^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "cppi_dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "musb_core.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "musb_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define EP_MODE_AUTOREQ_NONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define EP_MODE_AUTOREQ_ALL_NEOP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define EP_MODE_AUTOREQ_ALWAYS 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define EP_MODE_DMA_TRANSPARENT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define EP_MODE_DMA_RNDIS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define EP_MODE_DMA_GEN_RNDIS 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define USB_CTRL_TX_MODE 0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define USB_CTRL_RX_MODE 0x74
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define USB_CTRL_AUTOREQ 0xd0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define USB_TDOWN 0xd8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define MUSB_DMA_NUM_CHANNELS 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define DA8XX_USB_MODE 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define DA8XX_USB_AUTOREQ 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define DA8XX_USB_TEARDOWN 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define DA8XX_DMA_NUM_CHANNELS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct cppi41_dma_controller {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct dma_controller controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct cppi41_dma_channel *rx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct cppi41_dma_channel *tx_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct hrtimer early_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct list_head early_tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u32 rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u32 tx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 auto_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 tdown_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 autoreq_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned int mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u8 num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u8 toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (cppi41_channel->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!is_host_active(cppi41_channel->controller->controller.musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) cppi41_channel->usb_toggle = toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct musb *musb = hw_ep->musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u8 toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (cppi41_channel->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!is_host_active(musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) musb_ep_select(musb->mregs, hw_ep->epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * AM335x Advisory 1.0.13: Due to internal synchronisation error the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * data toggle may reset from DATA1 to DATA0 during receiving data from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * more than one endpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (!toggle && toggle == cppi41_channel->usb_toggle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) musb_dbg(musb, "Restoring DATA1 toggle.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) cppi41_channel->usb_toggle = toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u8 epnum = hw_ep->epnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct musb *musb = hw_ep->musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void __iomem *epio = musb->endpoints[epnum].regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) musb_ep_select(musb->mregs, hw_ep->epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (csr & MUSB_TXCSR_TXPKTRDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void cppi41_dma_callback(void *private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) const struct dmaengine_result *result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct musb *musb = hw_ep->musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void __iomem *epio = hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (!cppi41_channel->prog_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* done, complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) cppi41_channel->channel.actual_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) cppi41_channel->transferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) cppi41_channel->channel.rx_packet_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * transmit ZLP using PIO mode for transfers which size is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * multiple of EP packet size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) cppi41_channel->packet_sz) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) musb_ep_select(musb->mregs, hw_ep->epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) musb_writew(epio, MUSB_TXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) trace_musb_cppi41_done(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* next iteration, reload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct dma_chan *dc = cppi41_channel->dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct dma_async_tx_descriptor *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) enum dma_transfer_direction direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u32 remain_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) cppi41_channel->buf_addr += cppi41_channel->packet_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) remain_bytes = cppi41_channel->total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) remain_bytes -= cppi41_channel->transferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) cppi41_channel->prog_len = remain_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) : DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) dma_desc = dmaengine_prep_slave_single(dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) cppi41_channel->buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) remain_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (WARN_ON(!dma_desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) dma_desc->callback_result = cppi41_dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) dma_desc->callback_param = &cppi41_channel->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) trace_musb_cppi41_cont(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) dma_async_issue_pending(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!cppi41_channel->is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) musb_ep_select(musb->mregs, hw_ep->epnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) csr = musb_readw(epio, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) csr |= MUSB_RXCSR_H_REQPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) musb_writew(epio, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct cppi41_dma_controller *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct cppi41_dma_channel *cppi41_channel, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct musb *musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) enum hrtimer_restart ret = HRTIMER_NORESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) controller = container_of(timer, struct cppi41_dma_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) early_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) musb = controller->controller.musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) spin_lock_irqsave(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) tx_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bool empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) empty = musb_is_tx_fifo_empty(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) list_del_init(&cppi41_channel->tx_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) cppi41_trans_done(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!list_empty(&controller->early_tx_list) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) !hrtimer_is_queued(&controller->early_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = HRTIMER_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static void cppi41_dma_callback(void *private_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) const struct dmaengine_result *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct dma_channel *channel = private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct cppi41_dma_channel *cppi41_channel = channel->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct cppi41_dma_controller *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct musb *musb = hw_ep->musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct dma_tx_state txstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u32 transferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int is_hs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) bool empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) controller = cppi41_channel->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (controller->controller.dma_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) controller->controller.dma_callback(&controller->controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (result->result == DMA_TRANS_ABORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) spin_lock_irqsave(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) &txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) transferred = cppi41_channel->prog_len - txstate.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) cppi41_channel->transferred += transferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) trace_musb_cppi41_gb(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) update_rx_toggle(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (cppi41_channel->transferred == cppi41_channel->total_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) transferred < cppi41_channel->packet_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cppi41_channel->prog_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (cppi41_channel->is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (is_host_active(musb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) type = hw_ep->out_qh->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) type = hw_ep->ep_in.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (type == USB_ENDPOINT_XFER_ISOC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * Don't use the early-TX-interrupt workaround below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * for Isoch transfter. Since Isoch are periodic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * transfer, by the time the next transfer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * scheduled, the current one should be done already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * This avoids audio playback underrun issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) empty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) empty = musb_is_tx_fifo_empty(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (!cppi41_channel->is_tx || empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) cppi41_trans_done(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * On AM335x it has been observed that the TX interrupt fires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * too early that means the TXFIFO is not yet empty but the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * engine says that it is done with the transfer. We don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * receive a FIFO empty interrupt so the only thing we can do is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * to poll for the bit. On HS it usually takes 2us, on FS around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * 110us - 150us depending on the transfer size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * We spin on HS (no longer than than 25us and setup a timer on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * FS to check for the bit and complete the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (is_host_active(musb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) is_hs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (musb->g.speed == USB_SPEED_HIGH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) is_hs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (is_hs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) unsigned wait = 25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) empty = musb_is_tx_fifo_empty(hw_ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) cppi41_trans_done(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) wait--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) list_add_tail(&cppi41_channel->tx_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) &controller->early_tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!hrtimer_is_queued(&controller->early_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned long usecs = cppi41_channel->total_len / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) hrtimer_start_range_ns(&controller->early_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) usecs * NSEC_PER_USEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 20 * NSEC_PER_USEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) spin_unlock_irqrestore(&musb->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) unsigned shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) shift = (ep - 1) * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) old &= ~(3 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) old |= mode << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct cppi41_dma_controller *controller = cppi41_channel->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct musb *musb = controller->controller.musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) u32 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) u32 new_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) u32 old_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (cppi41_channel->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) old_mode = controller->tx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) old_mode = controller->rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) port = cppi41_channel->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) new_mode = update_ep_mode(port, mode, old_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (new_mode == old_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (cppi41_channel->is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) controller->tx_mode = new_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) musb_writel(musb->ctrl_base, USB_CTRL_TX_MODE, new_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) controller->rx_mode = new_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) musb_writel(musb->ctrl_base, USB_CTRL_RX_MODE, new_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct cppi41_dma_controller *controller = cppi41_channel->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct musb *musb = controller->controller.musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) unsigned int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) u32 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) u32 new_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) u32 old_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) old_mode = controller->tx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) port = cppi41_channel->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) shift = (port - 1) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!cppi41_channel->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) shift += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) new_mode = old_mode & ~(3 << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) new_mode |= mode << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (new_mode == old_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) controller->tx_mode = new_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct cppi41_dma_controller *controller = cppi41_channel->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) u32 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) u32 new_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) u32 old_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) old_mode = controller->auto_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) port = cppi41_channel->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) new_mode = update_ep_mode(port, mode, old_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (new_mode == old_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) controller->auto_req = new_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) musb_writel(controller->controller.musb->ctrl_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) controller->autoreq_reg, new_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static bool cppi41_configure_channel(struct dma_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) u16 packet_sz, u8 mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dma_addr_t dma_addr, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct cppi41_dma_channel *cppi41_channel = channel->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct cppi41_dma_controller *controller = cppi41_channel->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct dma_chan *dc = cppi41_channel->dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct dma_async_tx_descriptor *dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) enum dma_transfer_direction direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct musb *musb = cppi41_channel->controller->controller.musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned use_gen_rndis = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) cppi41_channel->buf_addr = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) cppi41_channel->total_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) cppi41_channel->transferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) cppi41_channel->packet_sz = packet_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * than max packet size at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (cppi41_channel->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) use_gen_rndis = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (use_gen_rndis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* RNDIS mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (len > packet_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) musb_writel(musb->ctrl_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) RNDIS_REG(cppi41_channel->port_num), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* gen rndis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) controller->set_dma_mode(cppi41_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) EP_MODE_DMA_GEN_RNDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* auto req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) cppi41_set_autoreq_mode(cppi41_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) EP_MODE_AUTOREQ_ALL_NEOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) musb_writel(musb->ctrl_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) RNDIS_REG(cppi41_channel->port_num), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) controller->set_dma_mode(cppi41_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) EP_MODE_DMA_TRANSPARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) cppi41_set_autoreq_mode(cppi41_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) EP_MODE_AUTOREQ_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* fallback mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) controller->set_dma_mode(cppi41_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) EP_MODE_DMA_TRANSPARENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) len = min_t(u32, packet_sz, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) cppi41_channel->prog_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!dma_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) dma_desc->callback_result = cppi41_dma_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dma_desc->callback_param = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) cppi41_channel->channel.rx_packet_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) trace_musb_cppi41_config(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) save_rx_toggle(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dma_async_issue_pending(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct musb_hw_ep *hw_ep, u8 is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct cppi41_dma_controller *controller = container_of(c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct cppi41_dma_controller, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct cppi41_dma_channel *cppi41_channel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) u8 ch_num = hw_ep->epnum - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (ch_num >= controller->num_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) cppi41_channel = &controller->tx_channel[ch_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) cppi41_channel = &controller->rx_channel[ch_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!cppi41_channel->dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (cppi41_channel->is_allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) cppi41_channel->hw_ep = hw_ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) cppi41_channel->is_allocated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) trace_musb_cppi41_alloc(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return &cppi41_channel->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static void cppi41_dma_channel_release(struct dma_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct cppi41_dma_channel *cppi41_channel = channel->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) trace_musb_cppi41_free(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (cppi41_channel->is_allocated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) cppi41_channel->is_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) channel->status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) channel->actual_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static int cppi41_dma_channel_program(struct dma_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) u16 packet_sz, u8 mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dma_addr_t dma_addr, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct cppi41_dma_channel *cppi41_channel = channel->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int hb_mult = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) channel->status == MUSB_DMA_STATUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (is_host_active(cppi41_channel->controller->controller.musb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (cppi41_channel->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) channel->status = MUSB_DMA_STATUS_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) channel->actual_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (hb_mult)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) packet_sz = hb_mult * (packet_sz & 0x7FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) channel->status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) void *buf, u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct cppi41_dma_channel *cppi41_channel = channel->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct cppi41_dma_controller *controller = cppi41_channel->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct musb *musb = controller->controller.musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (is_host_active(musb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (cppi41_channel->is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* AM335x Advisory 1.0.13. No workaround for device RX mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static int cppi41_dma_channel_abort(struct dma_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct cppi41_dma_channel *cppi41_channel = channel->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct cppi41_dma_controller *controller = cppi41_channel->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct musb *musb = controller->controller.musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) void __iomem *epio = cppi41_channel->hw_ep->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) int tdbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unsigned is_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) u16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) is_tx = cppi41_channel->is_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) trace_musb_cppi41_abort(cppi41_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) list_del_init(&cppi41_channel->tx_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) csr &= ~MUSB_TXCSR_DMAENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) musb_writew(epio, MUSB_TXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* delay to drain to cppi dma pipeline for isoch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) udelay(250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) csr = musb_readw(epio, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) musb_writew(epio, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* wait to drain cppi dma pipe line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) csr = musb_readw(epio, MUSB_RXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (csr & MUSB_RXCSR_RXPKTRDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) csr |= MUSB_RXCSR_FLUSHFIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) musb_writew(epio, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) musb_writew(epio, MUSB_RXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* DA8xx Advisory 2.3.27: wait 250 ms before to start the teardown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (musb->ops->quirks & MUSB_DA8XX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) mdelay(250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) tdbit = 1 << cppi41_channel->port_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) tdbit <<= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) musb_writel(musb->ctrl_base, controller->tdown_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) tdbit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ret = dmaengine_terminate_all(cppi41_channel->dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) } while (ret == -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) csr = musb_readw(epio, MUSB_TXCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (csr & MUSB_TXCSR_TXPKTRDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) csr |= MUSB_TXCSR_FLUSHFIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) musb_writew(epio, MUSB_TXCSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct dma_chan *dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) for (i = 0; i < ctrl->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) dc = ctrl->tx_channel[i].dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dma_release_channel(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) dc = ctrl->rx_channel[i].dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dma_release_channel(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) cppi41_release_all_dma_chans(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct musb *musb = controller->controller.musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct device *dev = musb->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct device_node *np = dev->parent->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct cppi41_dma_channel *cppi41_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) count = of_property_count_strings(np, "dma-names");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct dma_chan *dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct dma_channel *musb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) const char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) unsigned is_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) unsigned int port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ret = of_property_read_string_index(np, "dma-names", i, &str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (strstarts(str, "tx"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) is_tx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) else if (strstarts(str, "rx"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) is_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) dev_err(dev, "Wrong dmatype %s\n", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ret = kstrtouint(str + 2, 0, &port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (port > controller->num_channels || !port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (is_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) cppi41_channel = &controller->tx_channel[port - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) cppi41_channel = &controller->rx_channel[port - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) cppi41_channel->controller = controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) cppi41_channel->port_num = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) cppi41_channel->is_tx = is_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) INIT_LIST_HEAD(&cppi41_channel->tx_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) musb_dma = &cppi41_channel->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) musb_dma->private_data = cppi41_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) musb_dma->status = MUSB_DMA_STATUS_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) musb_dma->max_len = SZ_4M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) dc = dma_request_chan(dev->parent, str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (IS_ERR(dc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) ret = PTR_ERR(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) dev_err(dev, "Failed to request %s: %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) str, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) cppi41_channel->dc = dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) cppi41_release_all_dma_chans(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) void cppi41_dma_controller_destroy(struct dma_controller *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct cppi41_dma_controller *controller = container_of(c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct cppi41_dma_controller, controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) hrtimer_cancel(&controller->early_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) cppi41_dma_controller_stop(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) kfree(controller->rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) kfree(controller->tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) kfree(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct dma_controller *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct cppi41_dma_controller *controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int channel_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (!musb->controller->parent->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) dev_err(musb->controller, "Need DT for the DMA engine.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) controller = kzalloc(sizeof(*controller), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) goto kzalloc_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) controller->early_tx.function = cppi41_recheck_tx_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) INIT_LIST_HEAD(&controller->early_tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) controller->controller.channel_alloc = cppi41_dma_channel_allocate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) controller->controller.channel_release = cppi41_dma_channel_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) controller->controller.channel_program = cppi41_dma_channel_program;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) controller->controller.channel_abort = cppi41_dma_channel_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) controller->controller.is_compatible = cppi41_is_compatible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) controller->controller.musb = musb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (musb->ops->quirks & MUSB_DA8XX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) controller->tdown_reg = DA8XX_USB_TEARDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) controller->autoreq_reg = DA8XX_USB_AUTOREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) controller->set_dma_mode = da8xx_set_dma_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) controller->tdown_reg = USB_TDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) controller->autoreq_reg = USB_CTRL_AUTOREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) controller->set_dma_mode = cppi41_set_dma_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) controller->num_channels = MUSB_DMA_NUM_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) channel_size = controller->num_channels *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) sizeof(struct cppi41_dma_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (!controller->rx_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) goto rx_channel_alloc_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (!controller->tx_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto tx_channel_alloc_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ret = cppi41_dma_controller_start(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) goto plat_get_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return &controller->controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) plat_get_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) kfree(controller->tx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) tx_channel_alloc_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) kfree(controller->rx_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) rx_channel_alloc_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) kfree(controller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) kzalloc_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);