^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2005-2006 by Texas Instruments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This file implements a DMA interface using TI's CPPI DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/usb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "musb_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "musb_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "cppi_dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "davinci.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* CPPI DMA status 7-mar-2006:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * - See musb_{host,gadget}.c for more info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * - Correct RX DMA generally forces the engine into irq-per-packet mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * which can easily saturate the CPU under non-mass-storage loads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * NOTES 24-aug-2006 (2.6.18-rc4):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * evidently after the 1 byte packet was received and acked, the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * of its next (512 byte) packet. IRQ issues?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * evidently also directly update the RX and TX CSRs ... so audit all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * host and peripheral side DMA code to avoid CSR access after DMA has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * been started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* REVISIT now we can avoid preallocating these descriptors; or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * more simply, switch to a global freelist not per-channel ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Note: at full speed, 64 descriptors == 4K bulk data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define NUM_TXCHAN_BD 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define NUM_RXCHAN_BD 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline void cpu_drain_writebuffer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #ifdef CONFIG_CPU_ARM926T
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* REVISIT this "should not be needed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * but lack of it sure seemed to hurt ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct cppi_descriptor *bd = c->freelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) c->freelist = bd->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bd->next = c->freelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) c->freelist = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Start DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Initialize the DMA controller as necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* zero out entire rx state RAM entry for the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) musb_writel(&rx->rx_skipbytes, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) musb_writel(&rx->rx_head, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) musb_writel(&rx->rx_sop, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) musb_writel(&rx->rx_current, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) musb_writel(&rx->rx_buf_current, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) musb_writel(&rx->rx_len_len, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) musb_writel(&rx->rx_cnt_cnt, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* zero out entire tx state RAM entry for the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) musb_writel(&tx->tx_head, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) musb_writel(&tx->tx_buf, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) musb_writel(&tx->tx_current, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) musb_writel(&tx->tx_buf_current, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) musb_writel(&tx->tx_info, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) musb_writel(&tx->tx_rem_len, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* musb_writel(&tx->tx_dummy, 0, 0); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) musb_writel(&tx->tx_complete, 0, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* initialize channel fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) c->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) c->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) c->last_processed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) c->controller = cppi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) c->is_rndis = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) c->freelist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* build the BD Free list for the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct cppi_descriptor *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) bd->dma = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) cppi_bd_free(c, bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static int cppi_channel_abort(struct dma_channel *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static void cppi_pool_free(struct cppi_channel *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct cppi *cppi = c->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct cppi_descriptor *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) (void) cppi_channel_abort(&c->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) c->controller = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* free all its bds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) bd = c->last_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) dma_pool_free(cppi->pool, bd, bd->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bd = cppi_bd_alloc(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) } while (bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) c->last_processed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void cppi_controller_start(struct cppi *controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) void __iomem *tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* do whatever is necessary to start controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) controller->tx[i].transmit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) controller->tx[i].index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) controller->rx[i].transmit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) controller->rx[i].index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* setup BD list on a per channel basis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cppi_pool_init(controller, controller->tx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cppi_pool_init(controller, controller->rx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) tibase = controller->tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) INIT_LIST_HEAD(&controller->tx_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* initialise tx/rx channel head pointers to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct cppi_channel *tx_ch = controller->tx + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct cppi_tx_stateram __iomem *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) INIT_LIST_HEAD(&tx_ch->tx_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) tx_ch->state_ram = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) cppi_reset_tx(tx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct cppi_channel *rx_ch = controller->rx + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct cppi_rx_stateram __iomem *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) INIT_LIST_HEAD(&rx_ch->tx_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) rx_ch->state_ram = rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cppi_reset_rx(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* enable individual cppi channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) DAVINCI_DMA_ALL_CHANNELS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) DAVINCI_DMA_ALL_CHANNELS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* enable tx/rx CPPI control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* disable RNDIS mode, also host rx RNDIS autorequest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Stop DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * De-Init the DMA controller as necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void cppi_controller_stop(struct cppi *controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void __iomem *tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct musb *musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) musb = controller->controller.musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) tibase = controller->tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* DISABLE INDIVIDUAL CHANNEL Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) DAVINCI_DMA_ALL_CHANNELS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) DAVINCI_DMA_ALL_CHANNELS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) musb_dbg(musb, "Tearing down RX and TX Channels");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* FIXME restructure of txdma to use bds like rxdma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) controller->tx[i].last_processed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) cppi_pool_free(controller->tx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) cppi_pool_free(controller->rx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* in Tx Case proper teardown is supported. We resort to disabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * complete TX CPPI cannot be disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /*disable tx/rx cppi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* While dma channel is allocated, we only want the core irqs active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * for fault reports, otherwise we'd get irqs that we don't care about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Except for TX irqs, where dma done != fifo empty and reusable ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * NOTE: docs don't say either way, but irq masking **enables** irqs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * each transfer direction of a non-control endpoint, so allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * (and deallocating) is mostly a way to notice bad housekeeping on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * the software side. We assume the irqs are always active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static struct dma_channel *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) cppi_channel_allocate(struct dma_controller *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct musb_hw_ep *ep, u8 transmit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct cppi *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) u8 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct cppi_channel *cppi_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void __iomem *tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct musb *musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) controller = container_of(c, struct cppi, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) tibase = controller->tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) musb = c->musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) index = ep->epnum - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* return the corresponding CPPI Channel Handle, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * probably disable the non-CPPI irq until we need it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (transmit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (index >= ARRAY_SIZE(controller->tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) musb_dbg(musb, "no %cX%d CPPI channel", 'T', index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) cppi_ch = controller->tx + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (index >= ARRAY_SIZE(controller->rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) musb_dbg(musb, "no %cX%d CPPI channel", 'R', index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) cppi_ch = controller->rx + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) core_rxirq_disable(tibase, ep->epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* REVISIT make this an error later once the same driver code works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * with the other DMA engine too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (cppi_ch->hw_ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) musb_dbg(musb, "re-allocating DMA%d %cX channel %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) index, transmit ? 'T' : 'R', cppi_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cppi_ch->hw_ep = ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) cppi_ch->channel.max_len = 0x7fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) musb_dbg(musb, "Allocate CPPI%d %cX", index, transmit ? 'T' : 'R');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return &cppi_ch->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* Release a CPPI Channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static void cppi_channel_release(struct dma_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct cppi_channel *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) void __iomem *tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* REVISIT: for paranoia, check state and abort if needed... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) c = container_of(channel, struct cppi_channel, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tibase = c->controller->tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (!c->hw_ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) musb_dbg(c->controller->controller.musb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) "releasing idle DMA channel %p", c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) else if (!c->transmit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) core_rxirq_enable(tibase, c->index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* for now, leave its cppi IRQ enabled (we won't trigger it) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) c->hw_ep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) channel->status = MUSB_DMA_STATUS_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Context: controller irqlocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void __iomem *base = c->controller->mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct cppi_rx_stateram __iomem *rx = c->state_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) musb_ep_select(base, c->index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) musb_dbg(c->controller->controller.musb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) "RX DMA%d%s: %d left, csr %04x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) "%08x H%08x S%08x C%08x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) "B%08x L%08x %08x .. %08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) c->index, tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) musb_readl(c->controller->tibase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) musb_readw(c->hw_ep->regs, MUSB_RXCSR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) musb_readl(&rx->rx_skipbytes, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) musb_readl(&rx->rx_head, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) musb_readl(&rx->rx_sop, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) musb_readl(&rx->rx_current, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) musb_readl(&rx->rx_buf_current, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) musb_readl(&rx->rx_len_len, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) musb_readl(&rx->rx_cnt_cnt, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) musb_readl(&rx->rx_complete, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Context: controller irqlocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) void __iomem *base = c->controller->mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct cppi_tx_stateram __iomem *tx = c->state_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) musb_ep_select(base, c->index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) musb_dbg(c->controller->controller.musb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) "TX DMA%d%s: csr %04x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) "H%08x S%08x C%08x %08x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) "F%08x L%08x .. %08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) c->index, tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) musb_readw(c->hw_ep->regs, MUSB_TXCSR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) musb_readl(&tx->tx_head, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) musb_readl(&tx->tx_buf, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) musb_readl(&tx->tx_current, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) musb_readl(&tx->tx_buf_current, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) musb_readl(&tx->tx_info, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) musb_readl(&tx->tx_rem_len, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* dummy/unused word 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) musb_readl(&tx->tx_complete, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Context: controller irqlocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) cppi_rndis_update(struct cppi_channel *c, int is_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) void __iomem *tibase, int is_rndis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* we may need to change the rndis flag for this cppi channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (c->is_rndis != is_rndis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) u32 temp = 1 << (c->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (is_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) temp <<= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (is_rndis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) value |= temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) value &= ~temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) musb_writel(tibase, DAVINCI_RNDIS_REG, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) c->is_rndis = is_rndis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) pr_debug("RXBD/%s %08x: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) tag, bd->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) bd->hw_next, bd->hw_bufp, bd->hw_off_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) bd->hw_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct cppi_descriptor *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) cppi_dump_rx(level, rx, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (rx->last_processed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) cppi_dump_rxbd("last", rx->last_processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) for (bd = rx->head; bd; bd = bd->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) cppi_dump_rxbd("active", bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * so we won't ever use it (see "CPPI RX Woes" below).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static inline int cppi_autoreq_update(struct cppi_channel *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) void __iomem *tibase, int onepacket, unsigned n_bds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) #ifdef RNDIS_RX_IS_USABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* assert(is_host_active(musb)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* start from "AutoReq never" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) val = tmp & ~((0x3) << (rx->index * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* HCD arranged reqpkt for packet #1. we arrange int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * for all but the last one, maybe in two segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!onepacket) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* use two segments, autoreq "all" then the last "never" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) val |= ((0x3) << (rx->index * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) n_bds--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* one segment, autoreq "all-but-last" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) val |= ((0x1) << (rx->index * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (val != tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int n = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* make sure that autoreq is updated before continuing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (tmp == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) } while (n-- > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* REQPKT is turned off after each segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (n_bds && rx->channel.actual_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) void __iomem *regs = rx->hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) val = musb_readw(regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!(val & MUSB_RXCSR_H_REQPKT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) musb_writew(regs, MUSB_RXCSR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* flush writebuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) val = musb_readw(regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return n_bds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* Buffer enqueuing Logic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * - RX builds new queues each time, to help handle routine "early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * termination" cases (faults, including errors and short reads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * more correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * - for now, TX reuses the same queue of BDs every time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * REVISIT long term, we want a normal dynamic model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * ... the goal will be to append to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * existing queue, processing completed "dma buffers" (segments) on the fly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Otherwise we force an IRQ latency between requests, which slows us a lot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * (especially in "transparent" dma). Unfortunately that model seems to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * inherent in the DMA model from the Mentor code, except in the rare case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * of transfers big enough (~128+ KB) that we could append "middle" segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * in the TX paths. (RX can't do this, see below.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * That's true even in the CPPI- friendly iso case, where most urbs have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * several small segments provided in a group and where the "packet at a time"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * "transparent" DMA model is always correct, even on the RX side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * CPPI TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * ========
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * TX is a lot more reasonable than RX; it doesn't need to run in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * irq-per-packet mode very often. RNDIS mode seems to behave too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * (except how it handles the exactly-N-packets case). Building a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * txdma queue with multiple requests (urb or usb_request) looks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * like it would work ... but fault handling would need much testing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * The main issue with TX mode RNDIS relates to transfer lengths that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * are an exact multiple of the packet length. It appears that there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * a hiccup in that case (maybe the DMA completes before the ZLP gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * written?) boiling down to not being able to rely on CPPI writing any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * terminating zero length packet before the next transfer is written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * So that's punted to PIO; better yet, gadget drivers can avoid it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Plus, there's allegedly an undocumented constraint that rndis transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * length be a multiple of 64 bytes ... but the chip doesn't act that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * way, and we really don't _want_ that behavior anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * On TX, "transparent" mode works ... although experiments have shown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * problems trying to use the SOP/EOP bits in different USB packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * REVISIT try to handle terminating zero length packets using CPPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * links avoid that issue by forcing them to avoid zlps.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) unsigned maxpacket = tx->maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dma_addr_t addr = tx->buf_dma + tx->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) size_t length = tx->buf_len - tx->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct cppi_descriptor *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) unsigned n_bds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int rndis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* TX can use the CPPI "rndis" mode, where we can probably fit this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * transfer in one BD and one IRQ. The only time we would NOT want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * to use it is when hardware constraints prevent it, or if we'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * trigger the "send a ZLP?" confusion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) rndis = (maxpacket & 0x3f) == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) && length > maxpacket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) && length < 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) && (length % maxpacket) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (rndis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) maxpacket = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) n_bds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) n_bds = DIV_ROUND_UP(length, maxpacket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) n_bds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) length = min(n_bds * maxpacket, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) musb_dbg(musb, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) tx->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) maxpacket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) rndis ? "rndis" : "transparent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) n_bds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) (unsigned long long)addr, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* assuming here that channel_program is called during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * transfer initiation ... current code maintains state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * for one outstanding request only (no queues, not even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * the implicit ones of an iso urb).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) bd = tx->freelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) tx->head = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) tx->last_processed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* FIXME use BD pool like RX side does, and just queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * the minimum number for this request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Prepare queue of BDs first, then hand it to hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * All BDs except maybe the last should be of full packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * size; for RNDIS there _is_ only that last packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) for (i = 0; i < n_bds; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (++i < n_bds && bd->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) bd->hw_next = bd->next->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) bd->hw_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) bd->hw_bufp = tx->buf_dma + tx->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* FIXME set EOP only on the last packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * SOP only on the first ... avoid IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if ((tx->offset + maxpacket) <= tx->buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) tx->offset += maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) bd->hw_off_len = maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) | CPPI_OWN_SET | maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* only this one may be a partial USB Packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) u32 partial_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) partial_len = tx->buf_len - tx->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) tx->offset = tx->buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) bd->hw_off_len = partial_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) | CPPI_OWN_SET | partial_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (partial_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) bd->hw_options |= CPPI_ZERO_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) musb_dbg(musb, "TXBD %p: nxt %08x buf %08x len %04x opt %08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) bd, bd->hw_next, bd->hw_bufp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) bd->hw_off_len, bd->hw_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* update the last BD enqueued to the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) tx->tail = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) bd = bd->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* BDs live in DMA-coherent memory, but writes might be pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) cpu_drain_writebuffer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Write to the HeadPtr in state RAM to trigger */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) cppi_dump_tx(5, tx, "/S");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * CPPI RX Woes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * =============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * (Full speed transfers have similar scenarios.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * and the next packet goes into a buffer that's queued later; while (b) fills
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * the buffer with 1024 bytes. How to do that with CPPI?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * (b) loses **BADLY** because nothing (!) happens when that second packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * fills the buffer, much less when a third one arrives. (Which makes this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * is optional, and it's fine if peripherals -- not hosts! -- pad messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * out to end-of-buffer. Standard PCI host controller DMA descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * implement that mode by default ... which is no accident.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * ignores SOP/EOP markings and processes both of those BDs; so both packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * are loaded into the buffer (with a 212 byte gap between them), and the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * are intended as outputs for RX queues, not inputs...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * - A variant of "transparent" mode -- one BD at a time -- is the only way to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * reliably make both cases work, with software handling both cases correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * and at the significant penalty of needing an IRQ per packet. (The lack of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * I/O overlap can be slightly ameliorated by enabling double buffering.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * with guaranteed driver level fault recovery and scrubbing out what's left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * of that garbaged datastream.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * But there seems to be no way to identify the cases where CPPI RNDIS mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * is appropriate -- which do NOT include RNDIS host drivers, but do include
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * that applies best on the peripheral side (and which could fail rudely).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * cases other than mass storage class. Otherwise we're correct but slow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * since CPPI penalizes our need for a "true RNDIS" default mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * IFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * (a) peripheral mode ... since rndis peripherals could pad their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * writes to hosts, causing i/o failure; or we'd have to cope with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * a largely unknowable variety of host side protocol variants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * (b) and short reads are NOT errors ... since full reads would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * cause those same i/o failures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * (c) and read length is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * - less than 64KB (max per cppi descriptor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * - not a multiple of 4096 (g_zero default, full reads typical)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * - N (>1) packets long, ditto (full reads not EXPECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * THEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * try rx rndis mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * Cost of heuristic failing: RXDMA wedges at the end of transfers that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * fill out the whole buffer. Buggy host side usb network drivers could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * trigger that, but "in the field" such bugs seem to be all but unknown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * So this module parameter lets the heuristic be disabled. When using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * gadgetfs, the heuristic will probably need to be disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static bool cppi_rx_rndis = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) module_param(cppi_rx_rndis, bool, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * cppi_next_rx_segment - dma read for the next chunk of a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * @musb: the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * @rx: dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * @onepacket: true unless caller treats short reads as errors, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * performs fault recovery above usbcore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * Context: controller irqlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * See above notes about why we can't use multi-BD RX queues except in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * rare cases (mass storage class), and can never use the hardware "rndis"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * mode (since it's not a "true" RNDIS mode) with complete safety..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * code to recover from corrupted datastreams after each short transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned maxpacket = rx->maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) dma_addr_t addr = rx->buf_dma + rx->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) size_t length = rx->buf_len - rx->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct cppi_descriptor *bd, *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) unsigned n_bds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) void __iomem *tibase = musb->ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int is_rndis = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct cppi_descriptor *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (onepacket) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* almost every USB driver, host or peripheral side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) n_bds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* maybe apply the heuristic above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (cppi_rx_rndis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) && is_peripheral_active(musb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) && length > maxpacket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) && (length & ~0xffff) == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) && (length & 0x0fff) != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) && (length & (maxpacket - 1)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) maxpacket = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) is_rndis = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* virtually nothing except mass storage class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (length > 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) n_bds = 0xffff / maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) length = n_bds * maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) n_bds = DIV_ROUND_UP(length, maxpacket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (n_bds == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) onepacket = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* In host mode, autorequest logic can generate some IN tokens; it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * tricky since we can't leave REQPKT set in RXCSR after the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * finishes. So: multipacket transfers involve two or more segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * And always at least two IRQs ... RNDIS mode is not an option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (is_host_active(musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) length = min(n_bds * maxpacket, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) musb_dbg(musb, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) "dma 0x%llx len %u %u/%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) rx->index, maxpacket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) onepacket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ? (is_rndis ? "rndis" : "onepacket")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) : "multipacket",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) n_bds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) musb_readl(tibase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) & 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) (unsigned long long)addr, length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) rx->channel.actual_len, rx->buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* only queue one segment at a time, since the hardware prevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * correct queue shutdown after unexpected short packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) bd = cppi_bd_alloc(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) rx->head = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /* Build BDs for all packets in this segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) u32 bd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) bd = cppi_bd_alloc(rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (!bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) tail->next = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) tail->hw_next = bd->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) bd->hw_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* all but the last packet will be maxpacket size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (maxpacket < length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) bd_len = maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) bd_len = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) bd->hw_bufp = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) addr += bd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) rx->offset += bd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) bd->buflen = bd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) length -= bd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* we always expect at least one reusable BD! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (!tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) } else if (i < n_bds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) tail->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) tail->hw_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) bd = rx->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) rx->tail = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* short reads and other faults should terminate this entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * dma segment. we want one "dma packet" per dma segment, not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * one per USB packet, terminating the whole queue at once...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * NOTE that current hardware seems to ignore SOP and EOP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) bd->hw_options |= CPPI_SOP_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) tail->hw_options |= CPPI_EOP_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) for (d = rx->head; d; d = d->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) cppi_dump_rxbd("S", d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* in case the preceding transfer left some state... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) tail = rx->last_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (tail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) tail->next = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) tail->hw_next = bd->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) core_rxirq_enable(tibase, rx->index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* BDs live in DMA-coherent memory, but writes might be pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) cpu_drain_writebuffer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* REVISIT specs say to write this AFTER the BUFCNT register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * below ... but that loses badly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) musb_writel(&rx_ram->rx_head, 0, bd->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* bufferCount must be at least 3, and zeroes on completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * unless it underflows below zero, or stops at two, or keeps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * growing ... grr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) i = musb_readl(tibase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) musb_writel(tibase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) n_bds + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) else if (n_bds > (i - 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) musb_writel(tibase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) n_bds - (i - 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) i = musb_readl(tibase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (i < (2 + n_bds)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) musb_dbg(musb, "bufcnt%d underrun - %d (for %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) rx->index, i, n_bds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) musb_writel(tibase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) n_bds + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) cppi_dump_rx(4, rx, "/S");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * cppi_channel_program - program channel for data transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * @ch: the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * @maxpacket: max packet size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * @mode: For RX, 1 unless the usb protocol driver promised to treat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * all short reads as errors and kick in high level fault recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * For TX, ignored because of RNDIS mode races/glitches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * @dma_addr: dma address of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * @len: length of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * Context: controller irqlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static int cppi_channel_program(struct dma_channel *ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) u16 maxpacket, u8 mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dma_addr_t dma_addr, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct cppi_channel *cppi_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct cppi *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct musb *musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) cppi_ch = container_of(ch, struct cppi_channel, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) controller = cppi_ch->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) musb = controller->controller.musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) switch (ch->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) case MUSB_DMA_STATUS_BUS_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case MUSB_DMA_STATUS_CORE_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* fault irq handler should have handled cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) WARNING("%cX DMA%d not cleaned up after abort!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) cppi_ch->transmit ? 'T' : 'R',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cppi_ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* WARN_ON(1); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) case MUSB_DMA_STATUS_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) WARNING("program active channel? %cX DMA%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) cppi_ch->transmit ? 'T' : 'R',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) cppi_ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* WARN_ON(1); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case MUSB_DMA_STATUS_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) musb_dbg(musb, "%cX DMA%d not allocated!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) cppi_ch->transmit ? 'T' : 'R',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) cppi_ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) case MUSB_DMA_STATUS_FREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ch->status = MUSB_DMA_STATUS_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* set transfer parameters, then queue up its first segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) cppi_ch->buf_dma = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) cppi_ch->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) cppi_ch->maxpacket = maxpacket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) cppi_ch->buf_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) cppi_ch->channel.actual_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* TX channel? or RX? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (cppi_ch->transmit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) cppi_next_tx_segment(musb, cppi_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) cppi_next_rx_segment(musb, cppi_ch, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct cppi_channel *rx = &cppi->rx[ch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct cppi_rx_stateram __iomem *state = rx->state_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct cppi_descriptor *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct cppi_descriptor *last = rx->last_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) bool completed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) bool acked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) dma_addr_t safe2ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) void __iomem *regs = rx->hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct musb *musb = cppi->controller.musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) cppi_dump_rx(6, rx, "/K");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) bd = last ? last->next : rx->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* run through all completed BDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) i++, bd = bd->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* catch latest BD writes from CPPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (!completed && (bd->hw_options & CPPI_OWN_SET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) musb_dbg(musb, "C/RXBD %llx: nxt %08x buf %08x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) "off.len %08x opt.len %08x (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) bd->hw_off_len, bd->hw_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) rx->channel.actual_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /* actual packet received length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if ((bd->hw_options & CPPI_SOP_SET) && !completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (bd->hw_options & CPPI_EOQ_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) completed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (!completed && len < bd->buflen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* NOTE: when we get a short packet, RXCSR_H_REQPKT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * must have been cleared, and no more DMA packets may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * active be in the queue... TI docs didn't say, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * CPPI ignores those BDs even though OWN is still set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) completed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) musb_dbg(musb, "rx short %d/%d (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) len, bd->buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) rx->channel.actual_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /* If we got here, we expect to ack at least one BD; meanwhile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * CPPI may completing other BDs while we scan this list...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * RACE: we can notice OWN cleared before CPPI raises the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * matching irq by writing that BD as the completion pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * In such cases, stop scanning and wait for the irq, avoiding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * lost acks and states where BD ownership is unclear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (bd->dma == safe2ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) musb_writel(&state->rx_complete, 0, safe2ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) safe2ack = musb_readl(&state->rx_complete, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) acked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (bd->dma == safe2ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) safe2ack = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) rx->channel.actual_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) cppi_bd_free(rx, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) last = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* stop scanning on end-of-segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (bd->hw_next == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) completed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) rx->last_processed = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* dma abort, lost ack, or ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (!acked && last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) int csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) musb_writel(&state->rx_complete, 0, safe2ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (safe2ack == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) cppi_bd_free(rx, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) rx->last_processed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /* if we land here on the host side, H_REQPKT will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * be clear and we need to restart the queue...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) WARN_ON(rx->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) musb_ep_select(cppi->mregs, rx->index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) csr = musb_readw(regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (csr & MUSB_RXCSR_DMAENAB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) musb_dbg(musb, "list%d %p/%p, last %llx%s, csr %04x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) rx->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) rx->head, rx->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) rx->last_processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) ? (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) rx->last_processed->dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) completed ? ", completed" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) cppi_dump_rxq(4, "/what?", rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (!completed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) int csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) rx->head = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /* REVISIT seems like "autoreq all but EOP" doesn't...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * setting it here "should" be racey, but seems to work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (is_host_active(cppi->controller.musb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) && bd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) && !(csr & MUSB_RXCSR_H_REQPKT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) csr |= MUSB_RXCSR_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) musb_writew(regs, MUSB_RXCSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) MUSB_RXCSR_H_WZC_BITS | csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) rx->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) rx->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) irqreturn_t cppi_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct musb *musb = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct cppi *cppi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) void __iomem *tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct musb_hw_ep *hw_ep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) u32 rx, tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) int i, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) cppi = container_of(musb->dma_controller, struct cppi, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (cppi->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) spin_lock_irqsave(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) tibase = musb->ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (!tx && !rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (cppi->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* process TX channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) for (index = 0; tx; tx = tx >> 1, index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct cppi_channel *tx_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct cppi_tx_stateram __iomem *tx_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) bool completed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct cppi_descriptor *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (!(tx & 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) tx_ch = cppi->tx + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) tx_ram = tx_ch->state_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /* FIXME need a cppi_tx_scan() routine, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * can also be called from abort code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) cppi_dump_tx(5, tx_ch, "/E");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) bd = tx_ch->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * If Head is null then this could mean that a abort interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * that needs to be acknowledged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (NULL == bd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) musb_dbg(musb, "null BD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) musb_writel(&tx_ram->tx_complete, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /* run through all completed BDs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) i++, bd = bd->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* catch latest BD writes from CPPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (bd->hw_options & CPPI_OWN_SET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) bd, bd->hw_next, bd->hw_bufp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) bd->hw_off_len, bd->hw_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) tx_ch->channel.actual_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) tx_ch->last_processed = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* write completion register to acknowledge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * processing of completed BDs, and possibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * release the IRQ; EOQ might not be set ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * REVISIT use the same ack strategy as rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * REVISIT have observed bit 18 set; huh??
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) musb_writel(&tx_ram->tx_complete, 0, bd->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /* stop scanning on end-of-segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (bd->hw_next == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) completed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* on end of segment, maybe go to next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (completed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* cppi_dump_tx(4, tx_ch, "/complete"); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) /* transfer more, or report completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (tx_ch->offset >= tx_ch->buf_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) tx_ch->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) tx_ch->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) hw_ep = tx_ch->hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) musb_dma_completion(musb, index + 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /* Bigger transfer than we could fit in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * that first batch of descriptors...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) cppi_next_tx_segment(musb, tx_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) tx_ch->head = bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* Start processing the RX block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) for (index = 0; rx; rx = rx >> 1, index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (rx & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct cppi_channel *rx_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) rx_ch = cppi->rx + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /* let incomplete dma segments finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (!cppi_rx_scan(cppi, index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /* start another dma segment if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (rx_ch->channel.actual_len != rx_ch->buf_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) && rx_ch->channel.actual_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) == rx_ch->offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) cppi_next_rx_segment(musb, rx_ch, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* all segments completed! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) hw_ep = rx_ch->hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) core_rxirq_disable(tibase, index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) musb_dma_completion(musb, index + 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /* write to CPPI EOI register to re-enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (cppi->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) EXPORT_SYMBOL_GPL(cppi_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /* Instantiate a software object representing a DMA controller. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) struct dma_controller *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) cppi_dma_controller_create(struct musb *musb, void __iomem *mregs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) struct cppi *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct device *dev = musb->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) int irq = platform_get_irq_byname(pdev, "dma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) controller = kzalloc(sizeof *controller, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (!controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) controller->mregs = mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) controller->tibase = mregs - DAVINCI_BASE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) controller->controller.musb = musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) controller->controller.channel_alloc = cppi_channel_allocate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) controller->controller.channel_release = cppi_channel_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) controller->controller.channel_program = cppi_channel_program;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) controller->controller.channel_abort = cppi_channel_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* NOTE: allocating from on-chip SRAM would give the least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * contention for memory access, if that ever matters here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /* setup BufferPool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) controller->pool = dma_pool_create("cppi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) controller->controller.musb->controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) sizeof(struct cppi_descriptor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) CPPI_DESCRIPTOR_ALIGN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (!controller->pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) kfree(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (irq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) dev_err(dev, "request_irq %d failed!\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) musb_dma_controller_destroy(&controller->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) controller->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) cppi_controller_start(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return &controller->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) EXPORT_SYMBOL_GPL(cppi_dma_controller_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * Destroy a previously-instantiated DMA controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) void cppi_dma_controller_destroy(struct dma_controller *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) struct cppi *cppi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) cppi = container_of(c, struct cppi, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) cppi_controller_stop(cppi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (cppi->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) free_irq(cppi->irq, cppi->controller.musb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /* assert: caller stopped the controller first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) dma_pool_destroy(cppi->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) kfree(cppi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) EXPORT_SYMBOL_GPL(cppi_dma_controller_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * Context: controller irqlocked, endpoint selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static int cppi_channel_abort(struct dma_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct cppi_channel *cppi_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct cppi *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) void __iomem *mbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) void __iomem *tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct cppi_descriptor *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) cppi_ch = container_of(channel, struct cppi_channel, channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) controller = cppi_ch->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) switch (channel->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) case MUSB_DMA_STATUS_BUS_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) case MUSB_DMA_STATUS_CORE_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /* from RX or TX fault irq handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) case MUSB_DMA_STATUS_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) /* the hardware needs shutting down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) regs = cppi_ch->hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) case MUSB_DMA_STATUS_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) case MUSB_DMA_STATUS_FREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (!cppi_ch->transmit && cppi_ch->head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) cppi_dump_rxq(3, "/abort", cppi_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) mbase = controller->mregs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) tibase = controller->tibase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) queue = cppi_ch->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) cppi_ch->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) cppi_ch->tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /* REVISIT should rely on caller having done this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * and caller should rely on us not changing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * peripheral code is safe ... check host too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) musb_ep_select(mbase, cppi_ch->index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (cppi_ch->transmit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct cppi_tx_stateram __iomem *tx_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /* REVISIT put timeouts on these controller handshakes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) cppi_dump_tx(6, cppi_ch, " (teardown)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /* teardown DMA engine then usb core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) } while (!(value & CPPI_TEAR_READY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) tx_ram = cppi_ch->state_ram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) value = musb_readl(&tx_ram->tx_complete, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) } while (0xFFFFFFFC != value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /* FIXME clean up the transfer state ... here?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * the completion routine should get called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * an appropriate status code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) value = musb_readw(regs, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) value &= ~MUSB_TXCSR_DMAENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) value |= MUSB_TXCSR_FLUSHFIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) musb_writew(regs, MUSB_TXCSR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) musb_writew(regs, MUSB_TXCSR, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * 1. Write to completion Ptr value 0x1(bit 0 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * (write back mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * 2. Wait for abort interrupt and then put the channel in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * compare mode by writing 1 to the tx_complete register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) cppi_reset_tx(tx_ram, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) cppi_ch->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) musb_writel(&tx_ram->tx_complete, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) cppi_dump_tx(5, cppi_ch, " (done teardown)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /* REVISIT tx side _should_ clean up the same way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) * as the RX side ... this does no cleanup at all!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) } else /* RX */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* NOTE: docs don't guarantee any of this works ... we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * expect that if the usb core stops telling the cppi core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * to pull more data from it, then it'll be safe to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * current RX DMA state iff any pending fifo transfer is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) core_rxirq_disable(tibase, cppi_ch->index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /* for host, ensure ReqPkt is never set again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (is_host_active(cppi_ch->controller->controller.musb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) value &= ~((0x3) << (cppi_ch->index * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) csr = musb_readw(regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /* for host, clear (just) ReqPkt at end of current packet(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (is_host_active(cppi_ch->controller->controller.musb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) csr |= MUSB_RXCSR_H_WZC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) csr &= ~MUSB_RXCSR_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) csr |= MUSB_RXCSR_P_WZC_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) /* clear dma enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) csr &= ~(MUSB_RXCSR_DMAENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) musb_writew(regs, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) csr = musb_readw(regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* Quiesce: wait for current dma to finish (if not cleanup).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * We can't use bit zero of stateram->rx_sop, since that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * refers to an entire "DMA packet" not just emptying the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * current fifo. Most segments need multiple usb packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (channel->status == MUSB_DMA_STATUS_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /* scan the current list, reporting any data that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * transferred and acking any IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) cppi_rx_scan(controller, cppi_ch->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /* clobber the existing state once it's idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * NOTE: arguably, we should also wait for all the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * RX channels to quiesce (how??) and then temporarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * disable RXCPPI_CTRL_REG ... but it seems that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * rely on the controller restarting from state ram, with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * only RXCPPI_BUFCNT state being bogus. BUFCNT will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * correct itself after the next DMA transfer though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * REVISIT does using rndis mode change that?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) cppi_reset_rx(cppi_ch->state_ram);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /* next DMA request _should_ load cppi head ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /* ... we don't "free" that list, only mutate it in place. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) cppi_dump_rx(5, cppi_ch, " (done abort)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /* clean up previously pending bds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) cppi_bd_free(cppi_ch, cppi_ch->last_processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) cppi_ch->last_processed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) while (queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) struct cppi_descriptor *tmp = queue->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) cppi_bd_free(cppi_ch, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) queue = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) channel->status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) cppi_ch->buf_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) cppi_ch->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) cppi_ch->buf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) cppi_ch->maxpacket = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /* TBD Queries:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * Power Management ... probably turn off cppi during suspend, restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * check state ram? Clocking is presumably shared with usb core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) */