^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Renesas USB DMA Controller Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2015 Renesas Electronics Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * based on rcar-dmac.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2014 Renesas Electronics Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "../virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * struct usb_dmac_sg - Descriptor for a hardware transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @mem_addr: memory address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @size: transfer size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct usb_dmac_sg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) dma_addr_t mem_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * struct usb_dmac_desc - USB DMA Transfer Descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * @vd: base virtual channel DMA transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * @direction: direction of the DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * @sg_allocated_len: length of allocated sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * @sg_len: length of sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @sg_index: index of sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * @residue: residue after the DMAC completed a transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * @node: node for desc_got and desc_freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * @done_cookie: cookie after the DMAC completed a transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * @sg: information for the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct usb_dmac_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) enum dma_transfer_direction direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int sg_allocated_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned int sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned int sg_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) u32 residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) dma_cookie_t done_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct usb_dmac_sg sg[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * struct usb_dmac_chan - USB DMA Controller Channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @vc: base virtual DMA channel object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @iomem: channel I/O memory base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @index: index of this channel in the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @irq: irq number of this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @desc: the current descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @descs_allocated: number of descriptors allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @desc_got: got descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @desc_freed: freed descriptors after the DMAC completed a transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct usb_dmac_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) void __iomem *iomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct usb_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int descs_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct list_head desc_got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct list_head desc_freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * struct usb_dmac - USB DMA Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * @engine: base DMA engine object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * @dev: the hardware device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @iomem: remapped I/O memory base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @n_channels: number of available channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @channels: array of DMAC channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct usb_dmac {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct dma_device engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void __iomem *iomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int n_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct usb_dmac_chan *channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define to_usb_dmac(d) container_of(d, struct usb_dmac, engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define USB_DMAC_CHAN_OFFSET(i) (0x20 + 0x20 * (i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define USB_DMASWR 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define USB_DMASWR_SWR (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define USB_DMAOR 0x0060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define USB_DMAOR_AE (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define USB_DMAOR_DME (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define USB_DMASAR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define USB_DMADAR 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define USB_DMATCR 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define USB_DMATCR_MASK 0x00ffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define USB_DMACHCR 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define USB_DMACHCR_FTE (1 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define USB_DMACHCR_NULLE (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define USB_DMACHCR_NULL (1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define USB_DMACHCR_TS_8B ((0 << 7) | (0 << 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define USB_DMACHCR_TS_16B ((0 << 7) | (1 << 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define USB_DMACHCR_TS_32B ((1 << 7) | (0 << 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define USB_DMACHCR_IE (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define USB_DMACHCR_SP (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define USB_DMACHCR_TE (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define USB_DMACHCR_DE (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define USB_DMATEND 0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* Hardcode the xfer_shift to 5 (32bytes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define USB_DMAC_XFER_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define USB_DMAC_XFER_SIZE (1 << USB_DMAC_XFER_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define USB_DMAC_CHCR_TS USB_DMACHCR_TS_32B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define USB_DMAC_SLAVE_BUSWIDTH DMA_SLAVE_BUSWIDTH_32_BYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* for descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define USB_DMAC_INITIAL_NR_DESC 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define USB_DMAC_INITIAL_NR_SG 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Device access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void usb_dmac_write(struct usb_dmac *dmac, u32 reg, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) writel(data, dmac->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static u32 usb_dmac_read(struct usb_dmac *dmac, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return readl(dmac->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static u32 usb_dmac_chan_read(struct usb_dmac_chan *chan, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return readl(chan->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void usb_dmac_chan_write(struct usb_dmac_chan *chan, u32 reg, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) writel(data, chan->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Initialization and configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static bool usb_dmac_chan_is_busy(struct usb_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return (chcr & (USB_DMACHCR_DE | USB_DMACHCR_TE)) == USB_DMACHCR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static u32 usb_dmac_calc_tend(u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Please refer to the Figure "Example of Final Transaction Valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Data Transfer Enable (EDTEN) Setting" in the data sheet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return 0xffffffff << (32 - (size % USB_DMAC_XFER_SIZE ? :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) USB_DMAC_XFER_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* This function is already held by vc.lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void usb_dmac_chan_start_sg(struct usb_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct usb_dmac_desc *desc = chan->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct usb_dmac_sg *sg = desc->sg + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) dma_addr_t src_addr = 0, dst_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) WARN_ON_ONCE(usb_dmac_chan_is_busy(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (desc->direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) dst_addr = sg->mem_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) src_addr = sg->mem_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dev_dbg(chan->vc.chan.device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) "chan%u: queue sg %p: %u@%pad -> %pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) chan->index, sg, sg->size, &src_addr, &dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) usb_dmac_chan_write(chan, USB_DMASAR, src_addr & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) usb_dmac_chan_write(chan, USB_DMADAR, dst_addr & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) usb_dmac_chan_write(chan, USB_DMATCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) usb_dmac_chan_write(chan, USB_DMACHCR, USB_DMAC_CHCR_TS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) USB_DMACHCR_NULLE | USB_DMACHCR_IE | USB_DMACHCR_DE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* This function is already held by vc.lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void usb_dmac_chan_start_desc(struct usb_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) vd = vchan_next_desc(&chan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) chan->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * Remove this request from vc->desc_issued. Otherwise, this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * will get the previous value from vchan_next_desc() after a transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * was completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) chan->desc = to_usb_dmac_desc(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) chan->desc->sg_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) usb_dmac_chan_start_sg(chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static int usb_dmac_init(struct usb_dmac *dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u16 dmaor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Clear all channels and enable the DMAC globally. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) usb_dmac_write(dmac, USB_DMAOR, USB_DMAOR_DME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) dmaor = usb_dmac_read(dmac, USB_DMAOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if ((dmaor & (USB_DMAOR_AE | USB_DMAOR_DME)) != USB_DMAOR_DME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dev_warn(dmac->dev, "DMAOR initialization failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * Descriptors allocation and free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct usb_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) desc = kzalloc(struct_size(desc, sg, sg_len), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) desc->sg_allocated_len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) INIT_LIST_HEAD(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) list_add_tail(&desc->node, &chan->desc_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void usb_dmac_desc_free(struct usb_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct usb_dmac_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) list_splice_init(&chan->desc_freed, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) list_splice_init(&chan->desc_got, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) list_for_each_entry_safe(desc, _desc, &list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) chan->descs_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) unsigned int sg_len, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct usb_dmac_desc *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Get a freed descritpor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) list_for_each_entry(desc, &chan->desc_freed, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (sg_len <= desc->sg_allocated_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) list_move_tail(&desc->node, &chan->desc_got);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Allocate a new descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* If allocated the desc, it was added to tail of the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) list_move_tail(&desc->node, &chan->desc_got);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void usb_dmac_desc_put(struct usb_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct usb_dmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) list_move_tail(&desc->node, &chan->desc_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Stop and reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void usb_dmac_soft_reset(struct usb_dmac_chan *uchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct dma_chan *chan = &uchan->vc.chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct usb_dmac *dmac = to_usb_dmac(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* Don't issue soft reset if any one of channels is busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) for (i = 0; i < dmac->n_channels; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (usb_dmac_chan_is_busy(uchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) usb_dmac_write(dmac, USB_DMAOR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) usb_dmac_write(dmac, USB_DMASWR, USB_DMASWR_SWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) usb_dmac_write(dmac, USB_DMASWR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) usb_dmac_write(dmac, USB_DMAOR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static void usb_dmac_chan_halt(struct usb_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) chcr &= ~(USB_DMACHCR_IE | USB_DMACHCR_TE | USB_DMACHCR_DE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) usb_dmac_chan_write(chan, USB_DMACHCR, chcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) usb_dmac_soft_reset(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void usb_dmac_stop(struct usb_dmac *dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) usb_dmac_write(dmac, USB_DMAOR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * DMA engine operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static int usb_dmac_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) while (uchan->descs_allocated < USB_DMAC_INITIAL_NR_DESC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ret = usb_dmac_desc_alloc(uchan, USB_DMAC_INITIAL_NR_SG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) usb_dmac_desc_free(uchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) uchan->descs_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return pm_runtime_get_sync(chan->device->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static void usb_dmac_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* Protect against ISR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) spin_lock_irqsave(&uchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) usb_dmac_chan_halt(uchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) spin_unlock_irqrestore(&uchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) usb_dmac_desc_free(uchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) vchan_free_chan_resources(&uchan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) pm_runtime_put(chan->device->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) unsigned int sg_len, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned long dma_flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct usb_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dev_warn(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) "%s: bad parameter: len=%d\n", __func__, sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) desc->direction = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) desc->sg_len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) desc->sg[i].mem_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) desc->sg[i].size = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct usb_dmac_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) spin_lock_irqsave(&uchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) usb_dmac_chan_halt(uchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) vchan_get_all_descriptors(&uchan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (uchan->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) uchan->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) list_splice_init(&uchan->desc_got, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) list_for_each_entry_safe(desc, _desc, &list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) list_move_tail(&desc->node, &uchan->desc_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) spin_unlock_irqrestore(&uchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) vchan_dma_desc_free_list(&uchan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct usb_dmac_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int sg_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct usb_dmac_sg *sg = desc->sg + sg_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) u32 mem_addr = sg->mem_addr & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) unsigned int residue = sg->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * We cannot use USB_DMATCR to calculate residue because USB_DMATCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * has unsuited value to calculate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (desc->direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) residue -= usb_dmac_chan_read(chan, USB_DMADAR) - mem_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) residue -= usb_dmac_chan_read(chan, USB_DMASAR) - mem_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static u32 usb_dmac_chan_get_residue_if_complete(struct usb_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) dma_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct usb_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) u32 residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) list_for_each_entry_reverse(desc, &chan->desc_freed, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (desc->done_cookie == cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) residue = desc->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static u32 usb_dmac_chan_get_residue(struct usb_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dma_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) u32 residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct usb_dmac_desc *desc = chan->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) vd = vchan_find_desc(&chan->vc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) desc = to_usb_dmac_desc(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* Compute the size of all usb_dmac_sg still to be transferred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) for (i = desc->sg_index + 1; i < desc->sg_len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) residue += desc->sg[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /* Add the residue for the current sg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static enum dma_status usb_dmac_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) unsigned int residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) status = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* a client driver will get residue after DMA_COMPLETE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) spin_lock_irqsave(&uchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (status == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) residue = usb_dmac_chan_get_residue_if_complete(uchan, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) residue = usb_dmac_chan_get_residue(uchan, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) spin_unlock_irqrestore(&uchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) dma_set_residue(txstate, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static void usb_dmac_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) spin_lock_irqsave(&uchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (vchan_issue_pending(&uchan->vc) && !uchan->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) usb_dmac_chan_start_desc(uchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) spin_unlock_irqrestore(&uchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static void usb_dmac_virt_desc_free(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct usb_dmac_desc *desc = to_usb_dmac_desc(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct usb_dmac_chan *chan = to_usb_dmac_chan(vd->tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) usb_dmac_desc_put(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * IRQ handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct usb_dmac_desc *desc = chan->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) BUG_ON(!desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (++desc->sg_index < desc->sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) usb_dmac_chan_start_sg(chan, desc->sg_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) desc->residue = usb_dmac_get_current_residue(chan, desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) desc->sg_index - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) desc->done_cookie = desc->vd.tx.cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) desc->vd.tx_result.result = DMA_TRANS_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) desc->vd.tx_result.residue = desc->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) vchan_cookie_complete(&desc->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Restart the next transfer if this driver has a next desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) usb_dmac_chan_start_desc(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct usb_dmac_chan *chan = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u32 mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) u32 chcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) bool xfer_end = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) spin_lock(&chan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (chcr & USB_DMACHCR_DE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) xfer_end = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ret |= IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (chcr & USB_DMACHCR_NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* An interruption of TE will happen after we set FTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) mask |= USB_DMACHCR_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) chcr |= USB_DMACHCR_FTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ret |= IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (xfer_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) usb_dmac_isr_transfer_end(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) spin_unlock(&chan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * OF xlate and channel filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct of_phandle_args *dma_spec = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /* USB-DMAC should be used with fixed usb controller's FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (uchan->index != dma_spec->args[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (dma_spec->args_count != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* Only slave DMA channels can be allocated via DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ofdma->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * Power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static int usb_dmac_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct usb_dmac *dmac = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) for (i = 0; i < dmac->n_channels; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (!dmac->channels[i].iomem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) usb_dmac_chan_halt(&dmac->channels[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static int usb_dmac_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct usb_dmac *dmac = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return usb_dmac_init(dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static const struct dev_pm_ops usb_dmac_pm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) pm_runtime_force_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * Probe and remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static int usb_dmac_chan_probe(struct usb_dmac *dmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct usb_dmac_chan *uchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct platform_device *pdev = to_platform_device(dmac->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) char pdev_irqname[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) char *irqname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) uchan->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Request the channel interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) sprintf(pdev_irqname, "ch%u", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (uchan->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) dev_name(dmac->dev), index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!irqname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ret = devm_request_irq(dmac->dev, uchan->irq, usb_dmac_isr_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) IRQF_SHARED, irqname, uchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) uchan->irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) uchan->vc.desc_free = usb_dmac_virt_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) vchan_init(&uchan->vc, &dmac->engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) INIT_LIST_HEAD(&uchan->desc_freed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) INIT_LIST_HEAD(&uchan->desc_got);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static int usb_dmac_parse_of(struct device *dev, struct usb_dmac *dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) dev_err(dev, "unable to read dma-channels property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) dev_err(dev, "invalid number of channels %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) dmac->n_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) static int usb_dmac_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct dma_device *engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct usb_dmac *dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct resource *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (!dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) dmac->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) platform_set_drvdata(pdev, dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ret = usb_dmac_parse_of(&pdev->dev, dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) sizeof(*dmac->channels), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!dmac->channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* Request resources. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (IS_ERR(dmac->iomem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return PTR_ERR(dmac->iomem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Enable runtime PM and initialize the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ret = pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) goto error_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ret = usb_dmac_init(dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) dev_err(&pdev->dev, "failed to reset device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* Initialize the channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) INIT_LIST_HEAD(&dmac->engine.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) for (i = 0; i < dmac->n_channels; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ret = usb_dmac_chan_probe(dmac, &dmac->channels[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* Register the DMAC as a DMA provider for DT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ret = of_dma_controller_register(pdev->dev.of_node, usb_dmac_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * Register the DMA engine device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * Default transfer size of 32 bytes requires 32-byte alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) engine = &dmac->engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) dma_cap_set(DMA_SLAVE, engine->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) engine->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) engine->src_addr_widths = widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) engine->dst_addr_widths = widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) engine->device_alloc_chan_resources = usb_dmac_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) engine->device_free_chan_resources = usb_dmac_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) engine->device_prep_slave_sg = usb_dmac_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) engine->device_terminate_all = usb_dmac_chan_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) engine->device_tx_status = usb_dmac_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) engine->device_issue_pending = usb_dmac_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) ret = dma_async_device_register(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) pm_runtime_put(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) error_pm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) pm_runtime_put(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static void usb_dmac_chan_remove(struct usb_dmac *dmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct usb_dmac_chan *uchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) usb_dmac_chan_halt(uchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) devm_free_irq(dmac->dev, uchan->irq, uchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) static int usb_dmac_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct usb_dmac *dmac = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) for (i = 0; i < dmac->n_channels; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) usb_dmac_chan_remove(dmac, &dmac->channels[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) dma_async_device_unregister(&dmac->engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static void usb_dmac_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct usb_dmac *dmac = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) usb_dmac_stop(dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) static const struct of_device_id usb_dmac_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) { .compatible = "renesas,usb-dmac", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) { /* Sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static struct platform_driver usb_dmac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) .pm = &usb_dmac_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) .name = "usb-dmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) .of_match_table = usb_dmac_of_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) .probe = usb_dmac_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) .remove = usb_dmac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) .shutdown = usb_dmac_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) module_platform_driver(usb_dmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) MODULE_DESCRIPTION("Renesas USB DMA Controller Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) MODULE_LICENSE("GPL v2");