^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * MUSB OTG driver host support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2005 Mentor Graphics Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2005-2006 by Texas Instruments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2006-2007 Nokia Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "musb_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "musb_host.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "musb_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* MUSB HOST status 22-mar-2006
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - There's still lots of partial code duplication for fault paths, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * they aren't handled as consistently as they need to be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * - PIO mostly behaved when last tested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * + including ep0, with all usbtest cases 9, 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * + usbtest 14 (ep0out) doesn't seem to run at all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * configurations, but otherwise double buffering passes basic tests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * - DMA (CPPI) ... partially behaves, not currently recommended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * + about 1/15 the speed of typical EHCI implementations (PCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * + RX, all too often reqpkt seems to misbehave after tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * + TX, no known issues (other than evident silicon issue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * - DMA (Mentor/OMAP) ...has at least toggle update problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * starvation ... nothing yet for TX, interrupt, or bulk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * - Not tested with HNP, but some SRP paths seem to behave.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * NOTE 24-August-2006:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * extra endpoint for periodic use enabling hub + keybd + mouse. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * mostly works, except that with "usbnet" it's easy to trigger cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * although ARP RX wins. (That test was done with a full speed link.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * NOTE on endpoint usage:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * (Yes, bulk _could_ use more of the endpoints than that, and would even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * benefit from it.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * So far that scheduling is both dumb and optimistic: the endpoint will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * "claimed" until its software queue is no longer refilled. No multiplexing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * of transfers between endpoints, or anything clever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct musb *hcd_to_musb(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return *(struct musb **) hcd->hcd_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void musb_ep_program(struct musb *musb, u8 epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct urb *urb, int is_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u8 *buf, u32 offset, u32 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Clear TX fifo. Needed to avoid BABBLE errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct musb *musb = ep->musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void __iomem *epio = ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int retries = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) musb_writew(epio, MUSB_TXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * FIXME: sometimes the tx fifo flush failed, it has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * observed during device disconnect on AM335x.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * To reproduce the issue, ensure tx urb(s) are queued when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * unplug the usb device which is connected to AM335x usb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * host port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * I found using a usb-ethernet device and running iperf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * (client on AM335x) has very high chance to trigger it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Better to turn on musb_dbg() in musb_cleanup_urb() with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * CPPI enabled to see the issue when aborting the tx channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (dev_WARN_ONCE(musb->controller, retries-- < 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) "Could not flush host TX%d fifo: csr: %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ep->epnum, csr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void __iomem *epio = ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int retries = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* scrub any data left in the fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) } while (--retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ep->epnum, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* and reset for the next transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) musb_writew(epio, MUSB_TXCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * Start transmit. Caller is responsible for locking shared resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * musb must be locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline void musb_h_tx_start(struct musb_hw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u16 txcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* NOTE: no locks here; caller should lock and select EP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (ep->epnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) txcsr = musb_readw(ep->regs, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) musb_writew(ep->regs, MUSB_TXCSR, txcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) musb_writew(ep->regs, MUSB_CSR0, txcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) u16 txcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* NOTE: no locks here; caller should lock and select EP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) txcsr = musb_readw(ep->regs, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (is_cppi_enabled(ep->musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) txcsr |= MUSB_TXCSR_DMAMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) musb_writew(ep->regs, MUSB_TXCSR, txcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (is_in != 0 || ep->is_shared_fifo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ep->in_qh = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (is_in == 0 || ep->is_shared_fifo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ep->out_qh = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return is_in ? ep->in_qh : ep->out_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Start the URB at the front of an endpoint's queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * end must be claimed from the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Context: controller locked, irqs blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void __iomem *mbase = musb->mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct urb *urb = next_urb(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) void *buf = urb->transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u32 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct musb_hw_ep *hw_ep = qh->hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int epnum = hw_ep->epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* initialize software qh state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) qh->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) qh->segsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* gather right source of data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) switch (qh->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) case USB_ENDPOINT_XFER_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* control transfers always start with SETUP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) is_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) musb->ep0_stage = MUSB_EP0_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) buf = urb->setup_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) qh->iso_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) qh->frame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) offset = urb->iso_frame_desc[0].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) len = urb->iso_frame_desc[0].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) default: /* bulk, interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* actual_length may be nonzero on retry paths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) buf = urb->transfer_buffer + urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) len = urb->transfer_buffer_length - urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) trace_musb_urb_start(musb, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* Configure endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) musb_ep_set_qh(hw_ep, is_in, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* transmit may have more work: start it when it is time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* determine if the time is right for a periodic transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) switch (qh->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) musb_dbg(musb, "check whether there's still time for periodic Tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* FIXME this doesn't implement that scheduling policy ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * or handle framecounter wrapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (1) { /* Always assume URB_ISO_ASAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* REVISIT the SOF irq handler shouldn't duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * this code; and we don't init urb->start_frame...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) qh->frame = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) goto start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) qh->frame = urb->start_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* enable SOF interrupt so we can count down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) musb_dbg(musb, "SOF for %d", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) musb_dbg(musb, "Start TX%d %s", epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) hw_ep->tx_channel ? "dma" : "pio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!hw_ep->tx_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) musb_h_tx_start(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) musb_h_tx_dma_start(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* Context: caller owns controller lock, IRQs are blocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void musb_giveback(struct musb *musb, struct urb *urb, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) __releases(musb->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) __acquires(musb->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) trace_musb_urb_gb(musb, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) spin_unlock(&musb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) usb_hcd_giveback_urb(musb->hcd, urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) spin_lock(&musb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Advance this hardware endpoint's queue, completing the specified URB and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * advancing to either the next URB queued to that qh, or else invalidating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * that qh and advancing to the next qh scheduled after the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Context: caller owns controller lock, IRQs are blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void musb_advance_schedule(struct musb *musb, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct musb_hw_ep *hw_ep, int is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct musb_hw_ep *ep = qh->hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int ready = qh->is_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) u16 toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* save toggle eagerly, for paranoia */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) switch (qh->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) case USB_ENDPOINT_XFER_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) toggle = musb->io.get_toggle(qh, !is_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (status == 0 && urb->error_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) status = -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) qh->is_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) musb_giveback(musb, urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) qh->is_ready = ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* reclaim resources (and bandwidth) ASAP; deschedule it, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * invalidate qh as soon as list_empty(&hep->urb_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (list_empty(&qh->hep->urb_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct list_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct dma_controller *dma = musb->dma_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ep->rx_reinit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (ep->rx_channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) dma->channel_release(ep->rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ep->rx_channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ep->tx_reinit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (ep->tx_channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) dma->channel_release(ep->tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ep->tx_channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Clobber old pointers to this qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) musb_ep_set_qh(ep, is_in, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) qh->hep->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) switch (qh->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) case USB_ENDPOINT_XFER_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) case USB_ENDPOINT_XFER_BULK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* fifo policy for these lists, except that NAKing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * should rotate a qh to the end (for fairness).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (qh->mux == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) head = qh->ring.prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) list_del(&qh->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) kfree(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) qh = first_qh(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* this is where periodic bandwidth should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * de-allocated if it's tracked and allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * and where we'd update the schedule tree...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) kfree(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (qh != NULL && qh->is_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) musb_dbg(musb, "... next ep%d %cX urb %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) musb_start_urb(musb, is_in, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* we don't want fifo to fill itself again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * ignore dma (various models),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * leave toggle alone (may not have been saved yet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) csr &= ~(MUSB_RXCSR_H_REQPKT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) | MUSB_RXCSR_H_AUTOREQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) | MUSB_RXCSR_AUTOCLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* write 2x to allow double buffering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* flush writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return musb_readw(hw_ep->regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * PIO RX for a packet (or part of it).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) u16 rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int do_flush = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct musb_qh *qh = hw_ep->in_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int pipe = urb->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) void *buffer = urb->transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* musb_ep_select(mbase, epnum); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) rx_count = musb_readw(epio, MUSB_RXCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) urb->transfer_buffer, qh->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) urb->transfer_buffer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* unload FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (usb_pipeisoc(pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct usb_iso_packet_descriptor *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (iso_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) status = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) d = urb->iso_frame_desc + qh->iso_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) buf = buffer + d->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) length = d->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (rx_count > length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) do_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) length = rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) urb->actual_length += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) d->actual_length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) d->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* see if we are done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) done = (++qh->iso_idx >= urb->number_of_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* non-isoch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) buf = buffer + qh->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) length = urb->transfer_buffer_length - qh->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (rx_count > length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (urb->status == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) urb->status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) do_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) length = rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) urb->actual_length += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) qh->offset += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* see if we are done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) done = (urb->actual_length == urb->transfer_buffer_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) || (rx_count < qh->maxpacket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) || (urb->status != -EINPROGRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) && (urb->status == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) && (urb->transfer_flags & URB_SHORT_NOT_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) && (urb->actual_length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) < urb->transfer_buffer_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) urb->status = -EREMOTEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) musb_read_fifo(hw_ep, length, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) csr = musb_readw(epio, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) csr |= MUSB_RXCSR_H_WZC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (unlikely(do_flush))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) musb_h_flush_rxfifo(hw_ep, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* REVISIT this assumes AUTOCLEAR is never set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) csr |= MUSB_RXCSR_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) musb_writew(epio, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* we don't always need to reinit a given side of an endpoint...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * when we do, use tx/rx reinit routine and then construct a new CSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * to address data toggle, NYET, and DMA or PIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * it's possible that driver bugs (especially for DMA) or aborting a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * transfer might have left the endpoint busier than it should be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * the busy/not-empty tests are basically paranoia.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct musb_hw_ep *ep = musb->endpoints + epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * That always uses tx_reinit since ep0 repurposes TX register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * offsets; the initial SETUP packet is also a kind of OUT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* if programmed for Tx, put it in RX mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (ep->is_shared_fifo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) csr = musb_readw(ep->regs, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (csr & MUSB_TXCSR_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) musb_h_tx_flush_fifo(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) csr = musb_readw(ep->regs, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) musb_writew(ep->regs, MUSB_TXCSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) csr | MUSB_TXCSR_FRCDATATOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * Clear the MODE bit (and everything else) to enable Rx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (csr & MUSB_TXCSR_DMAMODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) musb_writew(ep->regs, MUSB_TXCSR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* scrub all previous state, clearing toggle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) csr = musb_readw(ep->regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (csr & MUSB_RXCSR_RXPKTRDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) WARNING("rx%d, packet/%d ready?\n", ep->epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) musb_readw(ep->regs, MUSB_RXCOUNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* target addr and (for multipoint) hub addr/port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (musb->is_multipoint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) musb_write_rxhubport(musb, epnum, qh->h_port_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* protocol/endpoint, interval/NAKlimit, i/o size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* NOTE: bulk combining rewrites high bits of maxpacket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* Set RXMAXP with the FIFO size of the endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * to disable double buffer mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) musb_writew(ep->regs, MUSB_RXMAXP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) qh->maxpacket | ((qh->hb_mult - 1) << 11));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ep->rx_reinit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct musb_hw_ep *hw_ep, struct musb_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct urb *urb, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) u32 *length, u8 *mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct dma_channel *channel = hw_ep->tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) u16 pkt_size = qh->maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (*length > channel->max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) *length = channel->max_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (*length > pkt_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) *mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* autoset shouldn't be set in high bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Enable Autoset according to table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * bulk_split hb_mult Autoset_Enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * 0 1 Yes(Normal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * 0 >1 No(High BW ISO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * 1 1 Yes(HS bulk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * 1 >1 Yes(FS bulk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) can_bulk_split(hw_ep->musb, qh->type)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) csr |= MUSB_TXCSR_AUTOSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) *mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) channel->desired_mode = *mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) musb_writew(epio, MUSB_TXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct musb_hw_ep *hw_ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct musb_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) u32 *length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u8 *mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct dma_channel *channel = hw_ep->tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) channel->actual_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * TX uses "RNDIS" mode automatically but needs help
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * to identify the zero-length-final-packet case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static bool musb_tx_dma_program(struct dma_controller *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct musb_hw_ep *hw_ep, struct musb_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct urb *urb, u32 offset, u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct dma_channel *channel = hw_ep->tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) u16 pkt_size = qh->maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) u8 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) &length, &mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) &length, &mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) qh->segsize = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * Ensure the data reaches to main memory before starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (!dma->channel_program(channel, pkt_size, mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) urb->transfer_dma + offset, length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) dma->channel_release(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) hw_ep->tx_channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * Program an HDRC endpoint as per the given URB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * Context: irqs blocked, controller lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static void musb_ep_program(struct musb *musb, u8 epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct urb *urb, int is_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) u8 *buf, u32 offset, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct dma_controller *dma_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct dma_channel *dma_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) u8 dma_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) void __iomem *mbase = musb->mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) u16 packet_sz = qh->maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) u8 use_dma = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) "h_addr%02x h_port%02x bytes %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) is_out ? "-->" : "<--",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) epnum, urb, urb->dev->speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) qh->addr_reg, qh->epnum, is_out ? "out" : "in",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) qh->h_addr_reg, qh->h_port_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) musb_ep_select(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (is_out && !len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) use_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) csr &= ~MUSB_TXCSR_DMAENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) musb_writew(epio, MUSB_TXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) hw_ep->tx_channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* candidate for DMA? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dma_controller = musb->dma_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (use_dma && is_dma_capable() && epnum && dma_controller) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (!dma_channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) dma_channel = dma_controller->channel_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) dma_controller, hw_ep, is_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (is_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) hw_ep->tx_channel = dma_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) hw_ep->rx_channel = dma_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dma_channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* make sure we clear DMAEnab, autoSet bits from previous run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* OUT/transmit/EP0 or IN/receive? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (is_out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) u16 int_txe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) u16 load_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* disable interrupt in case we flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) int_txe = musb->intrtxe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* general endpoint setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (epnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* flush all old state, set default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * We could be flushing valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * packets in double buffering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (!hw_ep->tx_double_buffered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) musb_h_tx_flush_fifo(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * We must not clear the DMAMODE bit before or in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * the same cycle with the DMAENAB bit, so we clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * the latter first...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) | MUSB_TXCSR_AUTOSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) | MUSB_TXCSR_DMAENAB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) | MUSB_TXCSR_FRCDATATOG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) | MUSB_TXCSR_H_RXSTALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) | MUSB_TXCSR_H_ERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) | MUSB_TXCSR_TXPKTRDY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) csr |= MUSB_TXCSR_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!hw_ep->tx_double_buffered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) csr |= musb->io.set_toggle(qh, is_out, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) musb_writew(epio, MUSB_TXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* REVISIT may need to clear FLUSHFIFO ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) csr &= ~MUSB_TXCSR_DMAMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) musb_writew(epio, MUSB_TXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* endpoint 0: just flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) musb_h_ep0_flush_fifo(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* target addr and (for multipoint) hub addr/port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (musb->is_multipoint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) musb_write_txfunaddr(musb, epnum, qh->addr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) musb_write_txhubport(musb, epnum, qh->h_port_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* FIXME if !epnum, do the same for RX ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* protocol/endpoint/interval/NAKlimit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (epnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (can_bulk_split(musb, qh->type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) qh->hb_mult = hw_ep->max_packet_sz_tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) / packet_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) musb_writew(epio, MUSB_TXMAXP, packet_sz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) | ((qh->hb_mult) - 1) << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) musb_writew(epio, MUSB_TXMAXP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) qh->maxpacket |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ((qh->hb_mult - 1) << 11));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (musb->is_multipoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) musb_writeb(epio, MUSB_TYPE0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) qh->type_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (can_bulk_split(musb, qh->type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) load_count = min((u32) hw_ep->max_packet_sz_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) load_count = min((u32) packet_sz, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (dma_channel && musb_tx_dma_program(dma_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) hw_ep, qh, urb, offset, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) load_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (load_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* PIO to load FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) qh->segsize = load_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) sg_miter_start(&qh->sg_miter, urb->sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) SG_MITER_ATOMIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) | SG_MITER_FROM_SG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (!sg_miter_next(&qh->sg_miter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) dev_err(musb->controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) "error: sg"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) "list empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) sg_miter_stop(&qh->sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) buf = qh->sg_miter.addr + urb->sg->offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) load_count = min_t(u32, load_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) qh->sg_miter.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) musb_write_fifo(hw_ep, load_count, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) qh->sg_miter.consumed = load_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) sg_miter_stop(&qh->sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) musb_write_fifo(hw_ep, load_count, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* re-enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) musb_writew(mbase, MUSB_INTRTXE, int_txe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* IN/receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) u16 csr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (hw_ep->rx_reinit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) musb_rx_reinit(musb, qh, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) csr |= musb->io.set_toggle(qh, is_out, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (qh->type == USB_ENDPOINT_XFER_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) csr |= MUSB_RXCSR_DISNYET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (csr & (MUSB_RXCSR_RXPKTRDY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) | MUSB_RXCSR_DMAENAB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) | MUSB_RXCSR_H_REQPKT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ERR("broken !rx_reinit, ep%d csr %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) hw_ep->epnum, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* scrub any stale state, leaving toggle alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) csr &= MUSB_RXCSR_DISNYET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* kick things off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* Candidate for DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) dma_channel->actual_len = 0L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) qh->segsize = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* AUTOREQ is in a DMA register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Unless caller treats short RX transfers as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * errors, we dare not queue multiple transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) dma_ok = dma_controller->channel_program(dma_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) packet_sz, !(urb->transfer_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) URB_SHORT_NOT_OK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) urb->transfer_dma + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) qh->segsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (!dma_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) dma_controller->channel_release(dma_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) hw_ep->rx_channel = dma_channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) csr |= MUSB_RXCSR_DMAENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) csr |= MUSB_RXCSR_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * the end; avoids starvation for other endpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct dma_channel *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) void __iomem *mbase = musb->mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) void __iomem *epio = ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct musb_qh *cur_qh, *next_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) u16 rx_csr, tx_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) u16 toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) musb_ep_select(mbase, ep->epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) dma = is_dma_capable() ? ep->rx_channel : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * Need to stop the transaction by clearing REQPKT first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) rx_csr = musb_readw(epio, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) rx_csr |= MUSB_RXCSR_H_WZC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) rx_csr &= ~MUSB_RXCSR_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) musb_writew(epio, MUSB_RXCSR, rx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) rx_csr &= ~MUSB_RXCSR_DATAERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) musb_writew(epio, MUSB_RXCSR, rx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) cur_qh = first_qh(&musb->in_bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) dma = is_dma_capable() ? ep->tx_channel : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* clear nak timeout bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) tx_csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) tx_csr |= MUSB_TXCSR_H_WZC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) musb_writew(epio, MUSB_TXCSR, tx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) cur_qh = first_qh(&musb->out_bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (cur_qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) urb = next_urb(cur_qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) dma->status = MUSB_DMA_STATUS_CORE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) musb->dma_controller->channel_abort(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) urb->actual_length += dma->actual_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) dma->actual_len = 0L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) toggle = musb->io.get_toggle(cur_qh, !is_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* move cur_qh to end of queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) list_move_tail(&cur_qh->ring, &musb->in_bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* get the next qh from musb->in_bulk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) next_qh = first_qh(&musb->in_bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* set rx_reinit and schedule the next qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ep->rx_reinit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* move cur_qh to end of queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) list_move_tail(&cur_qh->ring, &musb->out_bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* get the next qh from musb->out_bulk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) next_qh = first_qh(&musb->out_bulk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* set tx_reinit and schedule the next qh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ep->tx_reinit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (next_qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) musb_start_urb(musb, is_in, next_qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * Service the default endpoint (ep0) as host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * Return true until it's time to start the status stage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) bool more = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) u8 *fifo_dest = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) u16 fifo_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct musb_hw_ep *hw_ep = musb->control_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct musb_qh *qh = hw_ep->in_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct usb_ctrlrequest *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) switch (musb->ep0_stage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) case MUSB_EP0_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) fifo_dest = urb->transfer_buffer + urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) urb->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (fifo_count < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) urb->status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) musb_read_fifo(hw_ep, fifo_count, fifo_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) urb->actual_length += fifo_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (len < qh->maxpacket) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /* always terminate on short read; it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * rarely reported as an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) } else if (urb->actual_length <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) urb->transfer_buffer_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) more = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) case MUSB_EP0_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) request = (struct usb_ctrlrequest *) urb->setup_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (!request->wLength) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) musb_dbg(musb, "start no-DATA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) } else if (request->bRequestType & USB_DIR_IN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) musb_dbg(musb, "start IN-DATA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) musb->ep0_stage = MUSB_EP0_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) more = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) musb_dbg(musb, "start OUT-DATA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) musb->ep0_stage = MUSB_EP0_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) more = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) case MUSB_EP0_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) fifo_count = min_t(size_t, qh->maxpacket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) urb->transfer_buffer_length -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) urb->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (fifo_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) fifo_dest = (u8 *) (urb->transfer_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) + urb->actual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) fifo_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) (fifo_count == 1) ? "" : "s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) fifo_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) musb_write_fifo(hw_ep, fifo_count, fifo_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) urb->actual_length += fifo_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) more = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ERR("bogus ep0 stage %d\n", musb->ep0_stage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * Handle default endpoint interrupt as host. Only called in IRQ time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * from musb_interrupt().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * called with controller irqlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) irqreturn_t musb_h_ep0_irq(struct musb *musb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) u16 csr, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) void __iomem *mbase = musb->mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct musb_hw_ep *hw_ep = musb->control_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct musb_qh *qh = hw_ep->in_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) bool complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) irqreturn_t retval = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /* ep0 only has one queue, "in" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) urb = next_urb(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) musb_ep_select(mbase, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) csr = musb_readw(epio, MUSB_CSR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) len = (csr & MUSB_CSR0_RXPKTRDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) ? musb_readb(epio, MUSB_COUNT0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) csr, qh, len, urb, musb->ep0_stage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /* if we just did status stage, we are done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (MUSB_EP0_STATUS == musb->ep0_stage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) retval = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) complete = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /* prepare status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (csr & MUSB_CSR0_H_RXSTALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) musb_dbg(musb, "STALLING ENDPOINT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) status = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) } else if (csr & MUSB_CSR0_H_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) musb_dbg(musb, "no response, csr0 %04x", csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) musb_dbg(musb, "control NAK timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /* NOTE: this code path would be a good place to PAUSE a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * control transfer, if another one is queued, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * ep0 is more likely to stay busy. That's already done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * for bulk RX transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * if (qh->ring.next != &musb->control), then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * we have a candidate... NAKing is *NOT* an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) musb_writew(epio, MUSB_CSR0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) retval = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) musb_dbg(musb, "aborting");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) retval = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) urb->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) complete = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* use the proper sequence to abort the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (csr & MUSB_CSR0_H_REQPKT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) csr &= ~MUSB_CSR0_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) musb_writew(epio, MUSB_CSR0, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) musb_writew(epio, MUSB_CSR0, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) musb_h_ep0_flush_fifo(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) musb_writeb(epio, MUSB_NAKLIMIT0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) /* clear it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) musb_writew(epio, MUSB_CSR0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (unlikely(!urb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /* stop endpoint since we have no place for its data, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * SHOULD NEVER HAPPEN! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ERR("no URB for end 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) musb_h_ep0_flush_fifo(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (!complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /* call common logic and prepare response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (musb_h_ep0_continue(musb, len, urb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /* more packets required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) csr = (MUSB_EP0_IN == musb->ep0_stage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) /* data transfer complete; perform status phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (usb_pipeout(urb->pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) || !urb->transfer_buffer_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) csr = MUSB_CSR0_H_STATUSPKT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) | MUSB_CSR0_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) csr = MUSB_CSR0_H_STATUSPKT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) | MUSB_CSR0_TXPKTRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* disable ping token in status phase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) csr |= MUSB_CSR0_H_DIS_PING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* flag status stage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) musb->ep0_stage = MUSB_EP0_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) musb_writew(epio, MUSB_CSR0, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) retval = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) musb->ep0_stage = MUSB_EP0_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /* call completion handler if done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) musb_advance_schedule(musb, urb, hw_ep, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) #ifdef CONFIG_USB_INVENTRA_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /* Host side TX (OUT) using Mentor DMA works as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) submit_urb ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) - if queue was empty, Program Endpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) - ... which starts DMA to fifo in mode 1 or 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) DMA Isr (transfer complete) -> TxAvail()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) only in musb_cleanup_urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) - TxPktRdy has to be set in mode 0 or for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) short packets in mode 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* Service a Tx-Available or dma completion irq for the endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) void musb_host_tx(struct musb *musb, u8 epnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) int pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) u16 tx_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) size_t length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) size_t offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct musb_qh *qh = hw_ep->out_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct urb *urb = next_urb(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) u32 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) void __iomem *mbase = musb->mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct dma_channel *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) bool transfer_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) musb_ep_select(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) tx_csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /* with CPPI, DMA sometimes triggers "extra" irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (!urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) pipe = urb->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) trace_musb_urb_tx(musb, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) dma ? ", dma" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /* check for errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /* dma was disabled, fifo flushed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) musb_dbg(musb, "TX end %d stall", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /* stall; record URB status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) status = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* (NON-ISO) dma was disabled, fifo flushed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) status = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) && !list_is_singular(&musb->out_bulk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) musb_bulk_nak_timeout(musb, hw_ep, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) musb_dbg(musb, "TX ep%d device not responding", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /* NOTE: this code path would be a good place to PAUSE a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * transfer, if there's some other (nonperiodic) tx urb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * that could use this fifo. (dma complicates it...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * That's already done for bulk RX transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * if (bulk && qh->ring.next != &musb->out_bulk), then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * we have a candidate... NAKing is *NOT* an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) musb_ep_select(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) musb_writew(epio, MUSB_TXCSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) MUSB_TXCSR_H_WZC_BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) | MUSB_TXCSR_TXPKTRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) dma->status = MUSB_DMA_STATUS_CORE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) musb->dma_controller->channel_abort(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /* do the proper sequence to abort the transfer in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * usb core; the dma engine should already be stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) musb_h_tx_flush_fifo(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) tx_csr &= ~(MUSB_TXCSR_AUTOSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) | MUSB_TXCSR_DMAENAB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) | MUSB_TXCSR_H_ERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) | MUSB_TXCSR_H_RXSTALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) | MUSB_TXCSR_H_NAKTIMEOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) musb_ep_select(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) musb_writew(epio, MUSB_TXCSR, tx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /* REVISIT may need to clear FLUSHFIFO ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) musb_writew(epio, MUSB_TXCSR, tx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) musb_writeb(epio, MUSB_TXINTERVAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /* second cppi case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (is_dma_capable() && dma && !status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * DMA has completed. But if we're using DMA mode 1 (multi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * packet DMA), we need a terminal TXPKTRDY interrupt before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * we can consider this transfer completed, lest we trash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * its last packet when writing the next URB's data. So we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * switch back to mode 0 to get that interrupt; we'll come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * back here once it happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (tx_csr & MUSB_TXCSR_DMAMODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * We shouldn't clear DMAMODE with DMAENAB set; so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * clear them in a safe order. That should be OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * once TXPKTRDY has been set (and I've never seen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * it being 0 at this moment -- DMA interrupt latency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * is significant) but if it hasn't been then we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * no choice but to stop being polite and ignore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * programmer's guide... :-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * Note that we must write TXCSR with TXPKTRDY cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * in order not to re-trigger the packet send (this bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * can't be cleared by CPU), and there's another caveat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * TXPKTRDY may be set shortly and then cleared in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * double-buffered FIFO mode, so we do an extra TXCSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * read for debouncing...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) tx_csr &= musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) tx_csr &= ~(MUSB_TXCSR_DMAENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) MUSB_TXCSR_TXPKTRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) musb_writew(epio, MUSB_TXCSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) tx_csr | MUSB_TXCSR_H_WZC_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) tx_csr &= ~(MUSB_TXCSR_DMAMODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) MUSB_TXCSR_TXPKTRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) musb_writew(epio, MUSB_TXCSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) tx_csr | MUSB_TXCSR_H_WZC_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * There is no guarantee that we'll get an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * after clearing DMAMODE as we might have done this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * too late (after TXPKTRDY was cleared by controller).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * Re-read TXCSR as we have spoiled its previous value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) tx_csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * We may get here from a DMA completion or TXPKTRDY interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * In any case, we must check the FIFO status here and bail out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * only if the FIFO still has data -- that should prevent the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * "missed" TXPKTRDY interrupts and deal with double-buffered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * FIFO mode too...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) musb_dbg(musb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) "DMA complete but FIFO not empty, CSR %04x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) tx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (!status || dma || usb_pipeisoc(pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) length = dma->actual_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) length = qh->segsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) qh->offset += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (usb_pipeisoc(pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct usb_iso_packet_descriptor *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) d = urb->iso_frame_desc + qh->iso_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) d->actual_length = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) d->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (++qh->iso_idx >= urb->number_of_packets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) d++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) offset = d->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) length = d->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) } else if (dma && urb->transfer_buffer_length == qh->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* see if we need to send more data, or ZLP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (qh->segsize < qh->maxpacket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) else if (qh->offset == urb->transfer_buffer_length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) && !(urb->transfer_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) & URB_ZERO_PACKET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) offset = qh->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) length = urb->transfer_buffer_length - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) transfer_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* urb->status != -EINPROGRESS means request has been faulted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * so we must abort this transfer after cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (urb->status != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) status = urb->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /* set status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) urb->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) urb->actual_length = qh->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) offset, length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) musb_h_tx_dma_start(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) musb_dbg(musb, "not complete, but DMA enabled?");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * PIO: start next packet in this URB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * REVISIT: some docs say that when hw_ep->tx_double_buffered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) * (and presumably, FIFO is not half-full) we should write *two*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * packets before updating TXCSR; other docs disagree...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (length > qh->maxpacket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) length = qh->maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /* Unmap the buffer so that CPU can use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * We need to map sg if the transfer_buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (!urb->transfer_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /* sg_miter_start is already done in musb_ep_program */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (!sg_miter_next(&qh->sg_miter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) dev_err(musb->controller, "error: sg list empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) sg_miter_stop(&qh->sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) length = min_t(u32, length, qh->sg_miter.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) qh->sg_miter.consumed = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) sg_miter_stop(&qh->sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) qh->segsize = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) musb_ep_select(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) musb_writew(epio, MUSB_TXCSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) #ifdef CONFIG_USB_TI_CPPI41_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) struct musb_hw_ep *hw_ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct musb_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct dma_channel *channel = hw_ep->rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) dma_addr_t *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) (u32)urb->transfer_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) length = urb->iso_frame_desc[qh->iso_idx].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) val = musb_readw(epio, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) val |= MUSB_RXCSR_DMAENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) musb_writew(hw_ep->regs, MUSB_RXCSR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) return dma->channel_program(channel, qh->maxpacket, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) (u32)buf, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct musb_hw_ep *hw_ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct musb_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) defined(CONFIG_USB_TI_CPPI41_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* Host side RX (IN) using Mentor DMA works as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) submit_urb ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) - if queue was empty, ProgramEndpoint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) - first IN token is sent out (by setting ReqPkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) LinuxIsr -> RxReady()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /\ => first packet is received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) | - Set in mode 0 (DmaEnab, ~ReqPkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) | -> DMA Isr (transfer complete) -> RxReady()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) | - if urb not complete, send next IN token (ReqPkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) | | else complete urb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) ---------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * Nuances of mode 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * For short packets, no ack (+RxPktRdy) is sent automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * (even if AutoClear is ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * automatically => major problem, as collecting the next packet becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * difficult. Hence mode 1 is not used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * REVISIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * All we care about at this driver level is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * (b) termination conditions are: short RX, or buffer full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) * (c) fault modes include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * (and that endpoint's dma queue stops immediately)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * - overflow (full, PLUS more bytes in the terminal packet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * thus be a great candidate for using mode 1 ... for all but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * last packet of one URB's transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct musb_hw_ep *hw_ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct musb_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct dma_channel *channel = hw_ep->rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) int pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) bool done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) pipe = urb->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (usb_pipeisoc(pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct usb_iso_packet_descriptor *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) d = urb->iso_frame_desc + qh->iso_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) d->actual_length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /* even if there was an error, we did the dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * for iso_frame_desc->length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (d->status != -EILSEQ && d->status != -EOVERFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) d->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (++qh->iso_idx >= urb->number_of_packets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /* REVISIT: Why ignore return value here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (musb_dma_cppi41(hw_ep->musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) urb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* done if urb buffer is full or short packet is recd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) done = (urb->actual_length + len >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) urb->transfer_buffer_length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) || channel->actual_len < qh->maxpacket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) || channel->rx_packet_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /* send IN token for next packet, without AUTOREQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) val = musb_readw(epio, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) val |= MUSB_RXCSR_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /* Disadvantage of using mode 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * It's basically usable only for mass storage class; essentially all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * other protocols also terminate transfers on short packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * Details:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * If you try to use mode 1 for (transfer_buffer_length - 512), and try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * to use the extra IN token to grab the last packet using mode 0, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) * the problem is that you cannot be sure when the device will send the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * last packet and RxPktRdy set. Sometimes the packet is recd too soon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * such that it gets lost when RxCSR is re-set at the end of the mode 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * transfer, while sometimes it is recd just a little late so that if you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * try to configure for mode 0 soon after the mode 1 transfer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * completed, you will find rxcount 0. Okay, so you might think why not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * wait for an interrupt when the pkt is recd. Well, you won't get any!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct musb_hw_ep *hw_ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct musb_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) u8 iso_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct musb *musb = hw_ep->musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct dma_channel *channel = hw_ep->rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) u16 rx_count, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) int length, pipe, done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) dma_addr_t buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) rx_count = musb_readw(epio, MUSB_RXCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) pipe = urb->pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (usb_pipeisoc(pipe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) int d_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) struct usb_iso_packet_descriptor *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) d = urb->iso_frame_desc + qh->iso_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (iso_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) d_status = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (rx_count > d->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (d_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) d_status = -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) urb->error_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) musb_dbg(musb, "** OVERFLOW %d into %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) rx_count, d->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) length = d->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) length = rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) d->status = d_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) buf = urb->transfer_dma + d->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) length = rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) buf = urb->transfer_dma + urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) channel->desired_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) #ifdef USE_MODE1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) /* because of the issue below, mode 1 will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * only rarely behave with correct semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if ((urb->transfer_flags & URB_SHORT_NOT_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) && (urb->transfer_buffer_length - urb->actual_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) > qh->maxpacket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) channel->desired_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (rx_count < hw_ep->max_packet_sz_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) length = rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) channel->desired_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) length = urb->transfer_buffer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /* See comments above on disadvantages of using mode 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) val = musb_readw(epio, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) val &= ~MUSB_RXCSR_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (channel->desired_mode == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) val &= ~MUSB_RXCSR_H_AUTOREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) val |= MUSB_RXCSR_H_AUTOREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) val |= MUSB_RXCSR_DMAENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) /* autoclear shouldn't be set in high bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (qh->hb_mult == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) val |= MUSB_RXCSR_AUTOCLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* REVISIT if when actual_length != 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * transfer_buffer_length needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * adjusted first...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) done = dma->channel_program(channel, qh->maxpacket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) channel->desired_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) buf, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) dma->channel_release(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) hw_ep->rx_channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) val = musb_readw(epio, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) val &= ~(MUSB_RXCSR_DMAENAB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) | MUSB_RXCSR_H_AUTOREQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) | MUSB_RXCSR_AUTOCLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) musb_writew(epio, MUSB_RXCSR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct musb_hw_ep *hw_ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct musb_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) struct musb_hw_ep *hw_ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) struct musb_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) u8 iso_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * and high-bandwidth IN transfer cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) void musb_host_rx(struct musb *musb, u8 epnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) struct dma_controller *c = musb->dma_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct musb_qh *qh = hw_ep->in_qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) size_t xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) void __iomem *mbase = musb->mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) u16 rx_csr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) bool iso_err = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct dma_channel *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) musb_ep_select(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) urb = next_urb(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) xfer_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) rx_csr = musb_readw(epio, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) val = rx_csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (unlikely(!urb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * usbtest #11 (unlinks) triggers it regularly, sometimes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * with fifo full. (Only with DMA??)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) epnum, val, musb_readw(epio, MUSB_RXCOUNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) trace_musb_urb_rx(musb, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) /* check for errors, concurrent stall & unlink is not really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * handled yet! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) musb_dbg(musb, "RX end %d STALL", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) /* stall; record URB status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) status = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) dev_err(musb->controller, "ep%d RX three-strikes error", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * The three-strikes error could only happen when the USB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * device is not accessible, for example detached or powered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * off. So return the fatal error -ESHUTDOWN so hopefully the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * USB device drivers won't immediately resubmit the same URB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) status = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) musb_writeb(epio, MUSB_RXINTERVAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) rx_csr &= ~MUSB_RXCSR_H_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) musb_writew(epio, MUSB_RXCSR, rx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (USB_ENDPOINT_XFER_ISOC != qh->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) musb_dbg(musb, "RX end %d NAK timeout", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) /* NOTE: NAKing is *NOT* an error, so we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * continue. Except ... if there's a request for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * another QH, use that instead of starving it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * Devices like Ethernet and serial adapters keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) * reads posted at all times, which will starve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * other devices without this logic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (usb_pipebulk(urb->pipe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) && qh->mux == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) && !list_is_singular(&musb->in_bulk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) musb_bulk_nak_timeout(musb, hw_ep, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) musb_ep_select(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) rx_csr |= MUSB_RXCSR_H_WZC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) rx_csr &= ~MUSB_RXCSR_DATAERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) musb_writew(epio, MUSB_RXCSR, rx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) musb_dbg(musb, "RX end %d ISO data error", epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) /* packet error reported later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) iso_err = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) /* faults abort the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /* clean up dma and collect transfer count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) dma->status = MUSB_DMA_STATUS_CORE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) musb->dma_controller->channel_abort(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) xfer_len = dma->actual_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) musb_writeb(epio, MUSB_RXINTERVAL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /* thorough shutdown for now ... given more precise fault handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * and better queueing support, we might keep a DMA pipeline going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * while processing this irq for earlier completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) /* FIXME this is _way_ too much in-line logic for Mentor DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) (rx_csr & MUSB_RXCSR_H_REQPKT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) /* REVISIT this happened for a while on some short reads...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * the cleanup still needs investigation... looks bad...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * and also duplicates dma cleanup code above ... plus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * shouldn't this be the "half full" double buffer case?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) dma->status = MUSB_DMA_STATUS_CORE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) musb->dma_controller->channel_abort(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) xfer_len = dma->actual_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) xfer_len, dma ? ", dma" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) rx_csr &= ~MUSB_RXCSR_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) musb_ep_select(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) musb_writew(epio, MUSB_RXCSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) MUSB_RXCSR_H_WZC_BITS | rx_csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) xfer_len = dma->actual_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) val &= ~(MUSB_RXCSR_DMAENAB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) | MUSB_RXCSR_H_AUTOREQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) | MUSB_RXCSR_AUTOCLEAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) | MUSB_RXCSR_RXPKTRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) musb_writew(hw_ep->regs, MUSB_RXCSR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) musb_dma_cppi41(musb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) musb_dbg(hw_ep->musb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) "ep %d dma %s, rxcsr %04x, rxcount %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) epnum, done ? "off" : "reset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) musb_readw(epio, MUSB_RXCSR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) musb_readw(epio, MUSB_RXCOUNT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) } else if (urb->status == -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /* if no errors, be sure a packet is ready for unloading */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) status = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) ERR("Rx interrupt with no errors or packet!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) /* FIXME this is another "SHOULD NEVER HAPPEN" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) /* SCRUB (RX) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) /* do the proper sequence to abort the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) musb_ep_select(mbase, epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) val &= ~MUSB_RXCSR_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) musb_writew(epio, MUSB_RXCSR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /* we are expecting IN packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) musb_dma_cppi41(musb)) && dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) musb_dbg(hw_ep->musb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) "RX%d count %d, buffer 0x%llx len %d/%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) epnum, musb_readw(epio, MUSB_RXCOUNT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) (unsigned long long) urb->transfer_dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) + urb->actual_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) qh->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) urb->transfer_buffer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) xfer_len, iso_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) dev_err(musb->controller, "error: rx_dma failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (!dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) unsigned int received_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) /* Unmap the buffer so that CPU can use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * We need to map sg if the transfer_buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (!urb->transfer_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) qh->use_sg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) sg_miter_start(&qh->sg_miter, urb->sg, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) sg_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (qh->use_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (!sg_miter_next(&qh->sg_miter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) dev_err(musb->controller, "error: sg list empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) sg_miter_stop(&qh->sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) urb->transfer_buffer = qh->sg_miter.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) received_len = urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) qh->offset = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) done = musb_host_packet_rx(musb, urb, epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) iso_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) /* Calculate the number of bytes received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) received_len = urb->actual_length -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) received_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) qh->sg_miter.consumed = received_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) sg_miter_stop(&qh->sg_miter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) done = musb_host_packet_rx(musb, urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) epnum, iso_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) musb_dbg(musb, "read %spacket", done ? "last " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) urb->actual_length += xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) qh->offset += xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (qh->use_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) qh->use_sg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) urb->transfer_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (urb->status == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) urb->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * the software schedule associates multiple such nodes with a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * host side hardware endpoint + direction; scheduling may activate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * that hardware endpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) static int musb_schedule(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) struct musb *musb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) struct musb_qh *qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) int is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) int idle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) int best_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) int best_end, epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) struct musb_hw_ep *hw_ep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) struct list_head *head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) u8 toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) u8 txtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct urb *urb = next_urb(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) /* use fixed hardware for control and bulk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) head = &musb->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) hw_ep = musb->control_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /* else, periodic transfers get muxed to other endpoints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) * We know this qh hasn't been scheduled, so all we need to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * is choose which hardware endpoint to put it on ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * REVISIT what we really want here is a regular schedule tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * like e.g. OHCI uses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) best_diff = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) best_end = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) for (epnum = 1, hw_ep = musb->endpoints + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) epnum < musb->nr_endpoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) epnum++, hw_ep++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) int diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (musb_ep_get_qh(hw_ep, is_in) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (hw_ep == musb->bulk_ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) if (is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) diff = hw_ep->max_packet_sz_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) diff = hw_ep->max_packet_sz_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) diff -= (qh->maxpacket * qh->hb_mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (diff >= 0 && best_diff > diff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * Mentor controller has a bug in that if we schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * a BULK Tx transfer on an endpoint that had earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * handled ISOC then the BULK transfer has to start on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * a zero toggle. If the BULK transfer starts on a 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * toggle then this transfer will fail as the mentor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * controller starts the Bulk transfer on a 0 toggle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * irrespective of the programming of the toggle bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * in the TXCSR register. Check for this condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * while allocating the EP for a Tx Bulk transfer. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * so skip this EP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) hw_ep = musb->endpoints + epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) >> 4) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) best_diff = diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) best_end = epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /* use bulk reserved ep1 if no other ep is free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) hw_ep = musb->bulk_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (is_in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) head = &musb->in_bulk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) head = &musb->out_bulk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * multiplexed. This scheme does not work in high speed to full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * speed scenario as NAK interrupts are not coming from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * full speed device connected to a high speed device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * 4 (8 frame or 8ms) for FS device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (qh->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) qh->intv_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) } else if (best_end < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) dev_err(musb->controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) "%s hwep alloc failed for %dx%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) musb_ep_xfertype_string(qh->type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) qh->hb_mult, qh->maxpacket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) idle = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) qh->mux = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) hw_ep = musb->endpoints + best_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) idle = list_empty(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) list_add_tail(&qh->ring, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) qh->mux = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) qh->hw_ep = hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) qh->hep->hcpriv = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) musb_start_urb(musb, is_in, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) static int musb_urb_enqueue(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) struct usb_hcd *hcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) struct musb *musb = hcd_to_musb(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) struct usb_host_endpoint *hep = urb->ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) struct musb_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) struct usb_endpoint_descriptor *epd = &hep->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) unsigned type_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) unsigned interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) /* host role must be active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (!is_host_active(musb) || !musb->is_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) trace_musb_urb_enq(musb, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) spin_lock_irqsave(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) ret = usb_hcd_link_urb_to_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) qh = ret ? NULL : hep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) urb->hcpriv = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) /* DMA mapping was already done, if needed, and this urb is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) * hep->urb_list now ... so we're done, unless hep wasn't yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) * scheduled onto a live qh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * REVISIT best to keep hep->hcpriv valid until the endpoint gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * disabled, testing for empty qh->ring and avoiding qh setup costs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * except for the first urb queued after a config change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (qh || ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) /* Allocate and initialize qh, minimizing the work done each time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * REVISIT consider a dedicated qh kmem_cache, so it's harder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * for bugs in other kernel code to break this driver...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) qh = kzalloc(sizeof *qh, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (!qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) spin_lock_irqsave(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) usb_hcd_unlink_urb_from_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) qh->hep = hep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) qh->dev = urb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) INIT_LIST_HEAD(&qh->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) qh->is_ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) qh->maxpacket = usb_endpoint_maxp(epd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) qh->type = usb_endpoint_type(epd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) * Some musb cores don't support high bandwidth ISO transfers; and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * we don't (yet!) support high bandwidth interrupt transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) qh->hb_mult = usb_endpoint_maxp_mult(epd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (qh->hb_mult > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (!ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) dev_err(musb->controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) "high bandwidth %s (%dx%d) not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) musb_ep_xfertype_string(qh->type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) qh->hb_mult, qh->maxpacket & 0x7ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) ret = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) qh->maxpacket &= 0x7ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) qh->epnum = usb_endpoint_num(epd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) /* precompute rxtype/txtype/type0 register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) type_reg = (qh->type << 4) | qh->epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) switch (urb->dev->speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) case USB_SPEED_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) type_reg |= 0xc0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) case USB_SPEED_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) type_reg |= 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) type_reg |= 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) qh->type_reg = type_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) /* Precompute RXINTERVAL/TXINTERVAL register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) switch (qh->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) case USB_ENDPOINT_XFER_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * Full/low speeds use the linear encoding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * high speed uses the logarithmic encoding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (urb->dev->speed <= USB_SPEED_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) interval = max_t(u8, epd->bInterval, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) case USB_ENDPOINT_XFER_ISOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) /* ISO always uses logarithmic encoding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) interval = min_t(u8, epd->bInterval, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) /* REVISIT we actually want to use NAK limits, hinting to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) * transfer scheduling logic to try some other qh, e.g. try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) * for 2 msec first:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * The downside of disabling this is that transfer scheduling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * gets VERY unfair for nonperiodic transfers; a misbehaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * peripheral could make that hurt. That's perfectly normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * for reads from network or serial adapters ... so we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) * partial NAKlimit support for bulk RX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * The upside of disabling it is simpler transfer scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) interval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) qh->intv_reg = interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) /* precompute addressing for external hub/tt ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (musb->is_multipoint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) struct usb_device *parent = urb->dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) if (parent != hcd->self.root_hub) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) qh->h_addr_reg = (u8) parent->devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) /* set up tt info if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (urb->dev->tt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) qh->h_port_reg = (u8) urb->dev->ttport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if (urb->dev->tt->hub)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) qh->h_addr_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) (u8) urb->dev->tt->hub->devnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (urb->dev->tt->multi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) qh->h_addr_reg |= 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) * until we get real dma queues (with an entry for each urb/buffer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * we only have work to do in the former case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) spin_lock_irqsave(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (hep->hcpriv || !next_urb(qh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) /* some concurrent activity submitted another urb to hep...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) * odd, rare, error prone, but legal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) kfree(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) qh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) ret = musb_schedule(musb, qh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) urb->hcpriv = qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) /* FIXME set urb->start_frame for iso/intr, it's tested in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * musb_start_urb(), but otherwise only konicawc cares ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) spin_lock_irqsave(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) usb_hcd_unlink_urb_from_ep(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) kfree(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * abort a transfer that's at the head of a hardware queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * called with controller locked, irqs blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * that hardware queue advances to the next transfer, unless prevented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) struct musb_hw_ep *ep = qh->hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) struct musb *musb = ep->musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) void __iomem *epio = ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) unsigned hw_end = ep->epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) void __iomem *regs = ep->musb->mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) int is_in = usb_pipein(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct dma_channel *dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) musb_ep_select(regs, hw_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (is_dma_capable()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) dma = is_in ? ep->rx_channel : ep->tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) status = ep->musb->dma_controller->channel_abort(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) is_in ? 'R' : 'T', ep->epnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) urb->actual_length += dma->actual_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) /* turn off DMA requests, discard state, stop polling ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (ep->epnum && is_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) /* giveback saves bulk toggle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) csr = musb_h_flush_rxfifo(ep, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) /* clear the endpoint's irq status here to avoid bogus irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (is_dma_capable() && dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) musb_platform_clear_ep_rxintr(musb, ep->epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) } else if (ep->epnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) musb_h_tx_flush_fifo(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) csr &= ~(MUSB_TXCSR_AUTOSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) | MUSB_TXCSR_DMAENAB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) | MUSB_TXCSR_H_RXSTALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) | MUSB_TXCSR_H_NAKTIMEOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) | MUSB_TXCSR_H_ERROR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) | MUSB_TXCSR_TXPKTRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) musb_writew(epio, MUSB_TXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) /* REVISIT may need to clear FLUSHFIFO ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) musb_writew(epio, MUSB_TXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) /* flush cpu writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) musb_h_ep0_flush_fifo(ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) musb_advance_schedule(ep->musb, urb, ep, is_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) struct musb *musb = hcd_to_musb(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) struct musb_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) int is_in = usb_pipein(urb->pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) trace_musb_urb_deq(musb, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) spin_lock_irqsave(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) ret = usb_hcd_check_unlink_urb(hcd, urb, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) qh = urb->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (!qh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * Any URB not actively programmed into endpoint hardware can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) * immediately given back; that's any URB not at the head of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * endpoint queue, unless someday we get real DMA queues. And even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * if it's at the head, it might not be known to the hardware...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * Otherwise abort current transfer, pending DMA, etc.; urb->status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * has already been updated. This is a synchronous abort; it'd be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) * OK to hold off until after some IRQ, though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (!qh->is_ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) || urb->urb_list.prev != &qh->hep->urb_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) int ready = qh->is_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) qh->is_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) musb_giveback(musb, urb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) qh->is_ready = ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) /* If nothing else (usually musb_giveback) is using it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) * and its URB list has emptied, recycle this qh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (ready && list_empty(&qh->hep->urb_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) qh->hep->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) list_del(&qh->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) kfree(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) ret = musb_cleanup_urb(urb, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) /* disable an endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) struct musb *musb = hcd_to_musb(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) struct musb_qh *qh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) spin_lock_irqsave(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) qh = hep->hcpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) if (qh == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) /* Kick the first URB off the hardware, if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) qh->is_ready = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) urb = next_urb(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) /* make software (then hardware) stop ASAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) if (!urb->unlinked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) urb->status = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) /* cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) musb_cleanup_urb(urb, qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) /* Then nuke all the others ... and advance the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * queue on hw_ep (e.g. bulk ring) when we're done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) while (!list_empty(&hep->urb_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) urb = next_urb(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) urb->status = -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) /* Just empty the queue; the hardware is busy with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * other transfers, and since !qh->is_ready nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * will activate any of these as it advances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) while (!list_empty(&hep->urb_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) hep->hcpriv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) list_del(&qh->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) kfree(qh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) static int musb_h_get_frame_number(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) struct musb *musb = hcd_to_musb(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) return musb_readw(musb->mregs, MUSB_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) static int musb_h_start(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) struct musb *musb = hcd_to_musb(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) /* NOTE: musb_start() is called when the hub driver turns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) * on port power, or when (OTG) peripheral starts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) hcd->state = HC_STATE_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) musb->port1_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) static void musb_h_stop(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) musb_stop(hcd_to_musb(hcd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) hcd->state = HC_STATE_HALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) static int musb_bus_suspend(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) struct musb *musb = hcd_to_musb(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) u8 devctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) ret = musb_port_suspend(musb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (!is_host_active(musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) switch (musb->xceiv->otg->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) case OTG_STATE_A_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) case OTG_STATE_A_WAIT_VRISE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) /* ID could be grounded even if there's no device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * on the other end of the cable. NOTE that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * A_WAIT_VRISE timers are messy with MUSB...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (musb->is_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) WARNING("trying to suspend as %s while active\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) usb_otg_state_string(musb->xceiv->otg->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) static int musb_bus_resume(struct usb_hcd *hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) struct musb *musb = hcd_to_musb(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) if (musb->config &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) musb->config->host_port_deassert_reset_at_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) musb_port_reset(musb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) #ifndef CONFIG_MUSB_PIO_ONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) #define MUSB_USB_DMA_ALIGN 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) struct musb_temp_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) void *kmalloc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) void *old_xfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) u8 data[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) static void musb_free_temp_buffer(struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) struct musb_temp_buffer *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) size_t length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) if (dir == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) if (usb_pipeisoc(urb->pipe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) length = urb->transfer_buffer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) length = urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) memcpy(temp->old_xfer_buffer, temp->data, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) urb->transfer_buffer = temp->old_xfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) kfree(temp->kmalloc_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) struct musb_temp_buffer *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) void *kmalloc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) size_t kmalloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (urb->num_sgs || urb->sg ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) urb->transfer_buffer_length == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) /* Allocate a buffer with enough padding for alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) kmalloc_size = urb->transfer_buffer_length +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (!kmalloc_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) /* Position our struct temp_buffer such that data is aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) temp->kmalloc_ptr = kmalloc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) temp->old_xfer_buffer = urb->transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) if (dir == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) memcpy(temp->data, urb->transfer_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) urb->transfer_buffer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) urb->transfer_buffer = temp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) struct musb *musb = hcd_to_musb(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) * The DMA engine in RTL1.8 and above cannot handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) * DMA addresses that are not aligned to a 4 byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * For such engine implemented (un)map_urb_for_dma hooks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) * Do not use these hooks for RTL<1.8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) if (musb->hwvers < MUSB_HWVERS_1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) ret = musb_alloc_temp_buffer(urb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) musb_free_temp_buffer(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) struct musb *musb = hcd_to_musb(hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) usb_hcd_unmap_urb_for_dma(hcd, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) /* Do not use this hook for RTL<1.8 (see description above) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (musb->hwvers < MUSB_HWVERS_1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) musb_free_temp_buffer(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) #endif /* !CONFIG_MUSB_PIO_ONLY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) static const struct hc_driver musb_hc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) .description = "musb-hcd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) .product_desc = "MUSB HDRC host driver",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) .hcd_priv_size = sizeof(struct musb *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) .flags = HCD_USB2 | HCD_DMA | HCD_MEMORY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) /* not using irq handler or reset hooks from usbcore, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) * those must be shared with peripheral code for OTG configs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) .start = musb_h_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) .stop = musb_h_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) .get_frame_number = musb_h_get_frame_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) .urb_enqueue = musb_urb_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) .urb_dequeue = musb_urb_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) .endpoint_disable = musb_h_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) #ifndef CONFIG_MUSB_PIO_ONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) .map_urb_for_dma = musb_map_urb_for_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) .unmap_urb_for_dma = musb_unmap_urb_for_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) .hub_status_data = musb_hub_status_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) .hub_control = musb_hub_control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) .bus_suspend = musb_bus_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) .bus_resume = musb_bus_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) /* .start_port_reset = NULL, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) /* .hub_irq_enable = NULL, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) int musb_host_alloc(struct musb *musb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) struct device *dev = musb->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) if (!musb->hcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) *musb->hcd->hcd_priv = (unsigned long) musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) musb->hcd->self.uses_pio_for_control = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) musb->hcd->uses_new_polling = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) musb->hcd->has_tt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) void musb_host_cleanup(struct musb *musb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (musb->port_mode == MUSB_PERIPHERAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) usb_remove_hcd(musb->hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) void musb_host_free(struct musb *musb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) usb_put_hcd(musb->hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) int musb_host_setup(struct musb *musb, int power_budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) struct usb_hcd *hcd = musb->hcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (musb->port_mode == MUSB_HOST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) MUSB_HST_MODE(musb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) musb->xceiv->otg->state = OTG_STATE_A_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) otg_set_host(musb->xceiv->otg, &hcd->self);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) /* don't support otg protocols */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) hcd->self.otg_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) musb->xceiv->otg->host = &hcd->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) hcd->power_budget = 2 * (power_budget ? : 250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) hcd->skip_phy_initialization = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) ret = usb_add_hcd(hcd, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) device_wakeup_enable(hcd->self.controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) void musb_host_resume_root_hub(struct musb *musb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) usb_hcd_resume_root_hub(musb->hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) void musb_host_poke_root_hub(struct musb *musb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) MUSB_HST_MODE(musb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) if (musb->hcd->status_urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) usb_hcd_poll_rh_status(musb->hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) usb_hcd_resume_root_hub(musb->hcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) }