^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Renesas R-Car Gen2/Gen3 DMA Controller Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014-2019 Renesas Electronics Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * @node: entry in the parent's chunks list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * @src_addr: device source address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @dst_addr: device destination address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * @size: transfer size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct rcar_dmac_xfer_chunk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) dma_addr_t src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) dma_addr_t dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * @sar: value of the SAR register (source address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @dar: value of the DAR register (destination address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * @tcr: value of the TCR register (transfer count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct rcar_dmac_hw_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u32 sar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u32 dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u32 tcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) } __attribute__((__packed__));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * @async_tx: base DMA asynchronous transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @direction: direction of the DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * @xfer_shift: log2 of the transfer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @chcr: value of the channel configuration register for this transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @node: entry in the channel's descriptors lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @chunks: list of transfer chunks for this transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @running: the transfer chunk being currently processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @nchunks: number of transfer chunks for this transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @hwdescs.mem: hardware descriptors memory for the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @hwdescs.dma: device address of the hardware descriptors memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @hwdescs.size: size of the hardware descriptors in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @size: transfer size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @cyclic: when set indicates that the DMA transfer is cyclic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct rcar_dmac_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct dma_async_tx_descriptor async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) enum dma_transfer_direction direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned int xfer_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 chcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct list_head chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct rcar_dmac_xfer_chunk *running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int nchunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bool use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct rcar_dmac_hw_desc *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) } hwdescs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * struct rcar_dmac_desc_page - One page worth of descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @node: entry in the channel's pages list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @descs: array of DMA descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @chunks: array of transfer chunk descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct rcar_dmac_desc_page {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct rcar_dmac_desc descs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct rcar_dmac_xfer_chunk chunks[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define RCAR_DMAC_DESCS_PER_PAGE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) sizeof(struct rcar_dmac_desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) sizeof(struct rcar_dmac_xfer_chunk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * struct rcar_dmac_chan_slave - Slave configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @slave_addr: slave memory address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * @xfer_size: size (in bytes) of hardware transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct rcar_dmac_chan_slave {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) phys_addr_t slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned int xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * struct rcar_dmac_chan_map - Map of slave device phys to dma address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @addr: slave dma address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * @dir: direction of mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @slave: slave configuration that is mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct rcar_dmac_chan_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct rcar_dmac_chan_slave slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @chan: base DMA channel object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @iomem: channel I/O memory base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @index: index of this channel in the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @irq: channel IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @src: slave memory address and size on the source side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * @dst: slave memory address and size on the destination side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * @mid_rid: hardware MID/RID for the DMA client using this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @lock: protects the channel CHCR register and the desc members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @desc.free: list of free descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * @desc.pending: list of pending descriptors (submitted with tx_submit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @desc.active: list of active descriptors (activated with issue_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @desc.done: list of completed descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * @desc.wait: list of descriptors waiting for an ack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @desc.running: the descriptor being processed (a member of the active list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * @desc.chunks_free: list of free transfer chunk descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @desc.pages: list of pages used by allocated descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct rcar_dmac_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct dma_chan chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void __iomem *iomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct rcar_dmac_chan_slave src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct rcar_dmac_chan_slave dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct rcar_dmac_chan_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int mid_rid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct list_head free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct list_head pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct list_head active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct list_head done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct list_head wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct rcar_dmac_desc *running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct list_head chunks_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct list_head pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) } desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * struct rcar_dmac - R-Car Gen2 DMA Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @engine: base DMA engine object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @dev: the hardware device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @iomem: remapped I/O memory base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @n_channels: number of available channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @channels: array of DMAC channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @channels_mask: bitfield of which DMA channels are managed by this driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * @modules: bitmask of client modules in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct rcar_dmac {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct dma_device engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) void __iomem *iomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned int n_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct rcar_dmac_chan *channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u32 channels_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) DECLARE_BITMAP(modules, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * struct rcar_dmac_of_data - This driver's OF data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * @chan_offset_base: DMAC channels base offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * @chan_offset_stride: DMAC channels offset stride
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct rcar_dmac_of_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u32 chan_offset_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 chan_offset_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * Registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define RCAR_DMAISTA 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #define RCAR_DMASEC 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define RCAR_DMAOR 0x0060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define RCAR_DMAOR_PRI_FIXED (0 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define RCAR_DMAOR_AE (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #define RCAR_DMAOR_DME (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #define RCAR_DMACHCLR 0x0080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define RCAR_DMADPSEC 0x00a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define RCAR_DMASAR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define RCAR_DMADAR 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define RCAR_DMATCR 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define RCAR_DMATCR_MASK 0x00ffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #define RCAR_DMATSR 0x0028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #define RCAR_DMACHCR 0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #define RCAR_DMACHCR_CAE (1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define RCAR_DMACHCR_CAIE (1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define RCAR_DMACHCR_RPT_SAR (1 << 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #define RCAR_DMACHCR_RPT_DAR (1 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define RCAR_DMACHCR_RPT_TCR (1 << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define RCAR_DMACHCR_DPB (1 << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #define RCAR_DMACHCR_DSE (1 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #define RCAR_DMACHCR_DSIE (1 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #define RCAR_DMACHCR_DM_FIXED (0 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #define RCAR_DMACHCR_DM_INC (1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #define RCAR_DMACHCR_DM_DEC (2 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #define RCAR_DMACHCR_SM_FIXED (0 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #define RCAR_DMACHCR_SM_INC (1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #define RCAR_DMACHCR_SM_DEC (2 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #define RCAR_DMACHCR_RS_AUTO (4 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #define RCAR_DMACHCR_RS_DMARS (8 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #define RCAR_DMACHCR_IE (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define RCAR_DMACHCR_TE (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define RCAR_DMACHCR_DE (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define RCAR_DMATCRB 0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #define RCAR_DMATSRB 0x0038
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define RCAR_DMACHCRB 0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #define RCAR_DMACHCRB_DPTR_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define RCAR_DMACHCRB_DRST (1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #define RCAR_DMACHCRB_DTS (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #define RCAR_DMACHCRB_PRI(n) ((n) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #define RCAR_DMARS 0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define RCAR_DMABUFCR 0x0048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define RCAR_DMABUFCR_MBU(n) ((n) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define RCAR_DMABUFCR_ULB(n) ((n) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define RCAR_DMADPBASE 0x0050
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define RCAR_DMADPBASE_MASK 0xfffffff0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #define RCAR_DMADPBASE_SEL (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define RCAR_DMADPCR 0x0054
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define RCAR_DMADPCR_DIPT(n) ((n) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #define RCAR_DMAFIXSAR 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #define RCAR_DMAFIXDAR 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #define RCAR_DMAFIXDPBASE 0x0060
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* Hardcode the MEMCPY transfer size to 4 bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #define RCAR_DMAC_MEMCPY_XFER_SIZE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Device access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (reg == RCAR_DMAOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) writew(data, dmac->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) writel(data, dmac->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (reg == RCAR_DMAOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return readw(dmac->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return readl(dmac->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (reg == RCAR_DMARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return readw(chan->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return readl(chan->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (reg == RCAR_DMARS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) writew(data, chan->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) writel(data, chan->iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * Initialization and configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return !!(chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct rcar_dmac_desc *desc = chan->desc.running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u32 chcr = desc->chcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (chan->mid_rid >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (desc->hwdescs.use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct rcar_dmac_xfer_chunk *chunk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) list_first_entry(&desc->chunks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct rcar_dmac_xfer_chunk, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dev_dbg(chan->chan.device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) "chan%u: queue desc %p: %u@%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) chunk->src_addr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) chunk->dst_addr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) desc->hwdescs.dma >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) (desc->hwdescs.dma & 0xfffffff0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) RCAR_DMADPBASE_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) RCAR_DMACHCRB_DRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Errata: When descriptor memory is accessed through an IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * the DMADAR register isn't initialized automatically from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * first descriptor at beginning of transfer by the DMAC like it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * should. Initialize it manually with the destination address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * of the first chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) rcar_dmac_chan_write(chan, RCAR_DMADAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) chunk->dst_addr & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * Program the descriptor stage interrupt to occur after the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * of the first stage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * If the descriptor isn't cyclic enable normal descriptor mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * and the transfer completion interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!desc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * If the descriptor is cyclic and has a callback enable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * descriptor stage interrupt in infinite repeat mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) else if (desc->async_tx.callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Otherwise just select infinite repeat mode without any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) chcr |= RCAR_DMACHCR_DPM_INFINITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct rcar_dmac_xfer_chunk *chunk = desc->running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dev_dbg(chan->chan.device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) "chan%u: queue chunk %p: %u@%pad -> %pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) chan->index, chunk, chunk->size, &chunk->src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) &chunk->dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) chunk->src_addr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) chunk->dst_addr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) rcar_dmac_chan_write(chan, RCAR_DMASAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) chunk->src_addr & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) rcar_dmac_chan_write(chan, RCAR_DMADAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) chunk->dst_addr & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) rcar_dmac_chan_write(chan, RCAR_DMATCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) chunk->size >> desc->xfer_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) rcar_dmac_chan_write(chan, RCAR_DMACHCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static int rcar_dmac_init(struct rcar_dmac *dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) u16 dmaor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* Clear all channels and enable the DMAC globally. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) rcar_dmac_write(dmac, RCAR_DMAOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) dev_warn(dmac->dev, "DMAOR initialization failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Descriptors submission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) chan->index, tx->cookie, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) list_add_tail(&desc->node, &chan->desc.pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) desc->running = list_first_entry(&desc->chunks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct rcar_dmac_xfer_chunk, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * Descriptors allocation and free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * @chan: the DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * @gfp: allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct rcar_dmac_desc_page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) page = (void *)get_zeroed_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct rcar_dmac_desc *desc = &page->descs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) desc->async_tx.tx_submit = rcar_dmac_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) INIT_LIST_HEAD(&desc->chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) list_add_tail(&desc->node, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) list_splice_tail(&list, &chan->desc.free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) list_add_tail(&page->node, &chan->desc.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * rcar_dmac_desc_put - Release a DMA transfer descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * @chan: the DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * @desc: the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * Put the descriptor and its transfer chunk descriptors back in the channel's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * free descriptors lists. The descriptor's chunks list will be reinitialized to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * an empty list as a result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * The descriptor must have been removed from the channel's lists before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct rcar_dmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) list_add(&desc->node, &chan->desc.free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct rcar_dmac_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * We have to temporarily move all descriptors from the wait list to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * local list as iterating over the wait list, even with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * list_for_each_entry_safe, isn't safe if we release the channel lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * around the rcar_dmac_desc_put() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) list_splice_init(&chan->desc.wait, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) list_for_each_entry_safe(desc, _desc, &list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (async_tx_test_ack(&desc->async_tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) rcar_dmac_desc_put(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (list_empty(&list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Put the remaining descriptors back in the wait list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) list_splice(&list, &chan->desc.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * @chan: the DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * Locking: This function must be called in a non-atomic context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * Return: A pointer to the allocated descriptor or NULL if no descriptor can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * be allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct rcar_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* Recycle acked descriptors before attempting allocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) rcar_dmac_desc_recycle_acked(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) while (list_empty(&chan->desc.free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * No free descriptors, allocate a page worth of them and try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * again, as someone else could race us to get the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * allocated descriptors. If the allocation fails return an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * @chan: the DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * @gfp: allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct rcar_dmac_desc_page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) page = (void *)get_zeroed_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) list_add_tail(&chunk->node, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) list_splice_tail(&list, &chan->desc.chunks_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) list_add_tail(&page->node, &chan->desc.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * @chan: the DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * Locking: This function must be called in a non-atomic context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * descriptor can be allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static struct rcar_dmac_xfer_chunk *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct rcar_dmac_xfer_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) while (list_empty(&chan->desc.chunks_free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * No free descriptors, allocate a page worth of them and try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * again, as someone else could race us to get the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * allocated descriptors. If the allocation fails return an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) chunk = list_first_entry(&chan->desc.chunks_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct rcar_dmac_xfer_chunk, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) list_del(&chunk->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct rcar_dmac_desc *desc, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * dma_alloc_coherent() allocates memory in page size increments. To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * avoid reallocating the hardware descriptors when the allocated size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * wouldn't change align the requested size to a multiple of the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (desc->hwdescs.size == size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (desc->hwdescs.mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) desc->hwdescs.mem, desc->hwdescs.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) desc->hwdescs.mem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) desc->hwdescs.size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) &desc->hwdescs.dma, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (!desc->hwdescs.mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) desc->hwdescs.size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct rcar_dmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct rcar_dmac_xfer_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct rcar_dmac_hw_desc *hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) hwdesc = desc->hwdescs.mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (!hwdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) list_for_each_entry(chunk, &desc->chunks, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) hwdesc->sar = chunk->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) hwdesc->dar = chunk->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) hwdesc->tcr = chunk->size >> desc->xfer_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) hwdesc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * Stop and reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) u32 chcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * Ensure that the setting of the DE bit is actually 0 after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * clearing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) for (i = 0; i < 1024; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (!(chcr & RCAR_DMACHCR_DE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) dev_err(chan->chan.device->dev, "CHCR DE check error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* set DE=0 and flush remaining data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* make sure all remaining data was flushed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) rcar_dmac_chcr_de_barrier(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) RCAR_DMACHCR_TE | RCAR_DMACHCR_DE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) rcar_dmac_chcr_de_barrier(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct rcar_dmac_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) LIST_HEAD(descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* Move all non-free descriptors to the local lists. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) list_splice_init(&chan->desc.pending, &descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) list_splice_init(&chan->desc.active, &descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) list_splice_init(&chan->desc.done, &descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) list_splice_init(&chan->desc.wait, &descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) chan->desc.running = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) list_for_each_entry_safe(desc, _desc, &descs, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) rcar_dmac_desc_put(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /* Stop all channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) for (i = 0; i < dmac->n_channels; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct rcar_dmac_chan *chan = &dmac->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!(dmac->channels_mask & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* Stop and reinitialize the channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) spin_lock_irq(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) rcar_dmac_chan_halt(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) spin_unlock_irq(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static int rcar_dmac_chan_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) spin_lock_irqsave(&rchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) rcar_dmac_clear_chcr_de(rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) spin_unlock_irqrestore(&rchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * Descriptors preparation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct rcar_dmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static const u32 chcr_ts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) RCAR_DMACHCR_TS_64B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) unsigned int xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) u32 chcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) switch (desc->direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) | RCAR_DMACHCR_RS_DMARS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) xfer_size = chan->src.xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) | RCAR_DMACHCR_RS_DMARS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) xfer_size = chan->dst.xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) | RCAR_DMACHCR_RS_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) desc->xfer_shift = ilog2(xfer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) desc->chcr = chcr | chcr_ts[desc->xfer_shift];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * converted to scatter-gather to guarantee consistent locking and a correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * list manipulation. For slave DMA direction carries the usual meaning, and,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * logically, the SG list is RAM and the addr variable contains slave address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * and the SG list contains only one element and points at the source buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) unsigned int sg_len, dma_addr_t dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) enum dma_transfer_direction dir, unsigned long dma_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) bool cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct rcar_dmac_xfer_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct rcar_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) unsigned int nchunks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) unsigned int max_chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) unsigned int full_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) bool cross_boundary = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) u32 high_dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) u32 high_mem_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) desc = rcar_dmac_desc_get(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) desc->async_tx.flags = dma_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) desc->async_tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) desc->cyclic = cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) desc->direction = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) rcar_dmac_chan_configure_desc(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * Allocate and fill the transfer chunk descriptors. We own the only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * reference to the DMA descriptor, there's no need for locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) dma_addr_t mem_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) unsigned int len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) full_size += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) high_dev_addr = dev_addr >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) high_mem_addr = mem_addr >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if ((dev_addr >> 32 != high_dev_addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) (mem_addr >> 32 != high_mem_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) cross_boundary = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) unsigned int size = min(len, max_chunk_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * Prevent individual transfers from crossing 4GB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) cross_boundary = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) cross_boundary = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) chunk = rcar_dmac_xfer_chunk_get(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (!chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) rcar_dmac_desc_put(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) chunk->src_addr = dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) chunk->dst_addr = mem_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) chunk->src_addr = mem_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) chunk->dst_addr = dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) chunk->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) dev_dbg(chan->chan.device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) chan->index, chunk, desc, i, sg, size, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) &chunk->src_addr, &chunk->dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) mem_addr += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (dir == DMA_MEM_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) dev_addr += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) len -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) list_add_tail(&chunk->node, &desc->chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) nchunks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) desc->nchunks = nchunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) desc->size = full_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * Use hardware descriptor lists if possible when more than one chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * needs to be transferred (otherwise they don't make much sense).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * Source/Destination address should be located in same 4GiB region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * in the 40bit address space when it uses Hardware descriptor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * and cross_boundary is checking it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) desc->hwdescs.use = !cross_boundary && nchunks > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (desc->hwdescs.use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) desc->hwdescs.use = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return &desc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * DMA engine operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) INIT_LIST_HEAD(&rchan->desc.chunks_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) INIT_LIST_HEAD(&rchan->desc.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /* Preallocate descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return pm_runtime_get_sync(chan->device->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct rcar_dmac_chan_map *map = &rchan->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct rcar_dmac_desc_page *page, *_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct rcar_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* Protect against ISR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) spin_lock_irq(&rchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) rcar_dmac_chan_halt(rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) spin_unlock_irq(&rchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * Now no new interrupts will occur, but one might already be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * running. Wait for it to finish before freeing resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) synchronize_irq(rchan->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (rchan->mid_rid >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* The caller is holding dma_list_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) clear_bit(rchan->mid_rid, dmac->modules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) rchan->mid_rid = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) list_splice_init(&rchan->desc.free, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) list_splice_init(&rchan->desc.pending, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) list_splice_init(&rchan->desc.active, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) list_splice_init(&rchan->desc.done, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) list_splice_init(&rchan->desc.wait, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) rchan->desc.running = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) list_for_each_entry(desc, &list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) rcar_dmac_realloc_hwdesc(rchan, desc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) list_del(&page->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) free_page((unsigned long)page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /* Remove slave mapping if present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (map->slave.xfer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) dma_unmap_resource(chan->device->dev, map->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) map->slave.xfer_size, map->dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) map->slave.xfer_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) pm_runtime_put(chan->device->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) dma_addr_t dma_src, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct scatterlist sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) sg_init_table(&sgl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) offset_in_page(dma_src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) sg_dma_address(&sgl) = dma_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) sg_dma_len(&sgl) = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) DMA_MEM_TO_MEM, flags, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) enum dma_transfer_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct rcar_dmac_chan_map *map = &rchan->map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) phys_addr_t dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) size_t dev_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) enum dma_data_direction dev_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) dev_addr = rchan->src.slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dev_size = rchan->src.xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) dev_dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) dev_addr = rchan->dst.slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) dev_size = rchan->dst.xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dev_dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* Reuse current map if possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (dev_addr == map->slave.slave_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) dev_size == map->slave.xfer_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) dev_dir == map->dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* Remove old mapping if present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (map->slave.xfer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dma_unmap_resource(chan->device->dev, map->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) map->slave.xfer_size, map->dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) map->slave.xfer_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) /* Create new slave address map. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dev_dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (dma_mapping_error(chan->device->dev, map->addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) dev_err(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) "chan%u: failed to map %zx@%pap", rchan->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) dev_size, &dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) rchan->index, dev_size, &dev_addr, &map->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) map->slave.slave_addr = dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) map->slave.xfer_size = dev_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) map->dir = dev_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) unsigned int sg_len, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* Someone calling slave DMA on a generic channel? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) dev_warn(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) "%s: bad parameter: len=%d, id=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) __func__, sg_len, rchan->mid_rid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (rcar_dmac_map_slave_addr(chan, dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) dir, flags, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) #define RCAR_DMAC_MAX_SG_LEN 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) size_t buf_len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) enum dma_transfer_direction dir, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct scatterlist *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) unsigned int sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /* Someone calling slave DMA on a generic channel? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (rchan->mid_rid < 0 || buf_len < period_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) dev_warn(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) __func__, buf_len, period_len, rchan->mid_rid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (rcar_dmac_map_slave_addr(chan, dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) sg_len = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) dev_err(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) "chan%u: sg length %d exceeds limit %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * Allocate the sg list dynamically as it would consume too much stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) sg_init_table(sgl, sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) for (i = 0; i < sg_len; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) dma_addr_t src = buf_addr + (period_len * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) offset_in_page(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) sg_dma_address(&sgl[i]) = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) sg_dma_len(&sgl[i]) = period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) dir, flags, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) kfree(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static int rcar_dmac_device_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct dma_slave_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * We could lock this, but you shouldn't be configuring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * channel, while using it...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) rchan->src.slave_addr = cfg->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) rchan->dst.slave_addr = cfg->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) rchan->src.xfer_size = cfg->src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) rchan->dst.xfer_size = cfg->dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) spin_lock_irqsave(&rchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) rcar_dmac_chan_halt(rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) spin_unlock_irqrestore(&rchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * FIXME: No new interrupt can occur now, but the IRQ thread might still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * be running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) rcar_dmac_chan_reinit(rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dma_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct rcar_dmac_desc *desc = chan->desc.running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct rcar_dmac_xfer_chunk *running = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct rcar_dmac_xfer_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) unsigned int residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) unsigned int dptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) unsigned int chcrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) unsigned int tcrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * If the cookie corresponds to a descriptor that has been completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * there is no residue. The same check has already been performed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * caller but without holding the channel lock, so the descriptor could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * now be complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) status = dma_cookie_status(&chan->chan, cookie, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (status == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * If the cookie doesn't correspond to the currently running transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * then the descriptor hasn't been processed yet, and the residue is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * equal to the full descriptor size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * Also, a client driver is possible to call this function before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * will be the next descriptor, and the done list will appear. So, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * the argument cookie matches the done list's cookie, we can assume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * the residue is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (cookie != desc->async_tx.cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) list_for_each_entry(desc, &chan->desc.done, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (cookie == desc->async_tx.cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) list_for_each_entry(desc, &chan->desc.pending, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (cookie == desc->async_tx.cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return desc->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) list_for_each_entry(desc, &chan->desc.active, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (cookie == desc->async_tx.cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return desc->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * No descriptor found for the cookie, there's thus no residue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * This shouldn't happen if the calling driver passes a correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * cookie value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) WARN(1, "No descriptor for cookie!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * We need to read two registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * Make sure the control register does not skip to next chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * while reading the counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * Trying it 3 times should be enough: Initial read, retry, retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * for the paranoid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) RCAR_DMACHCRB_DPTR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /* Still the same? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) RCAR_DMACHCRB_DPTR_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) WARN_ONCE(i >= 3, "residue might be not continuous!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * In descriptor mode the descriptor running pointer is not maintained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * by the interrupt handler, find the running descriptor from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * descriptor pointer field in the CHCRB register. In non-descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * mode just use the running descriptor pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (desc->hwdescs.use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (dptr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) dptr = desc->nchunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) dptr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) WARN_ON(dptr >= desc->nchunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) running = desc->running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* Compute the size of all chunks still to be transferred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) list_for_each_entry_reverse(chunk, &desc->chunks, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (chunk == running || ++dptr == desc->nchunks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) residue += chunk->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /* Add the residue for the current chunk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) residue += tcrb << desc->xfer_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) unsigned int residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) status = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (status == DMA_COMPLETE || !txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) spin_lock_irqsave(&rchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) residue = rcar_dmac_chan_get_residue(rchan, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) spin_unlock_irqrestore(&rchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /* if there's no residue, the cookie is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (!residue && !cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) dma_set_residue(txstate, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static void rcar_dmac_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) spin_lock_irqsave(&rchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (list_empty(&rchan->desc.pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) /* Append the pending list to the active list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * If no transfer is running pick the first descriptor from the active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * list and start the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (!rchan->desc.running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct rcar_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) desc = list_first_entry(&rchan->desc.active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct rcar_dmac_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) rchan->desc.running = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) rcar_dmac_chan_start_xfer(rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) spin_unlock_irqrestore(&rchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static void rcar_dmac_device_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) synchronize_irq(rchan->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * IRQ handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct rcar_dmac_desc *desc = chan->desc.running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) unsigned int stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (WARN_ON(!desc || !desc->cyclic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * This should never happen, there should always be a running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * cyclic descriptor when a descriptor stage end interrupt is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * triggered. Warn and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) /* Program the interrupt pointer to the next stage. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct rcar_dmac_desc *desc = chan->desc.running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) irqreturn_t ret = IRQ_WAKE_THREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (WARN_ON_ONCE(!desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * This should never happen, there should always be a running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * descriptor when a transfer end interrupt is triggered. Warn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * The transfer end interrupt isn't generated for each chunk when using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * descriptor mode. Only update the running chunk pointer in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * non-descriptor mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (!desc->hwdescs.use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * If we haven't completed the last transfer chunk simply move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * to the next one. Only wake the IRQ thread if the transfer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * cyclic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (!list_is_last(&desc->running->node, &desc->chunks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) desc->running = list_next_entry(desc->running, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (!desc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * We've completed the last transfer chunk. If the transfer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * cyclic, move back to the first one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (desc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) desc->running =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) list_first_entry(&desc->chunks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct rcar_dmac_xfer_chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /* The descriptor is complete, move it to the done list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) list_move_tail(&desc->node, &chan->desc.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /* Queue the next descriptor, if any. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (!list_empty(&chan->desc.active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) chan->desc.running = list_first_entry(&chan->desc.active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) struct rcar_dmac_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) chan->desc.running = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (chan->desc.running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) rcar_dmac_chan_start_xfer(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct rcar_dmac_chan *chan = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) bool reinit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) u32 chcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) spin_lock(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (chcr & RCAR_DMACHCR_CAE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * We don't need to call rcar_dmac_chan_halt()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) * because channel is already stopped in error case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * We need to clear register and check DE bit as recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) rcar_dmac_chcr_de_barrier(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) reinit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) goto spin_lock_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (chcr & RCAR_DMACHCR_TE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) mask |= RCAR_DMACHCR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (mask & RCAR_DMACHCR_DE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) rcar_dmac_chcr_de_barrier(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (chcr & RCAR_DMACHCR_DSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) ret |= rcar_dmac_isr_desc_stage_end(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (chcr & RCAR_DMACHCR_TE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) ret |= rcar_dmac_isr_transfer_end(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) spin_lock_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) spin_unlock(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (reinit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) dev_err(chan->chan.device->dev, "Channel Address Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) rcar_dmac_chan_reinit(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) struct rcar_dmac_chan *chan = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct rcar_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) spin_lock_irq(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) /* For cyclic transfers notify the user after every chunk. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (chan->desc.running && chan->desc.running->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) desc = chan->desc.running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) dmaengine_desc_get_callback(&desc->async_tx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (dmaengine_desc_callback_valid(&cb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) spin_unlock_irq(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) spin_lock_irq(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * Call the callback function for all descriptors on the done list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * move them to the ack wait list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) while (!list_empty(&chan->desc.done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) dma_cookie_complete(&desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) dmaengine_desc_get_callback(&desc->async_tx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (dmaengine_desc_callback_valid(&cb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) spin_unlock_irq(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) * We own the only reference to this descriptor, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * safely dereference it without holding the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) * lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) spin_lock_irq(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) list_add_tail(&desc->node, &chan->desc.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) spin_unlock_irq(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /* Recycle all acked descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) rcar_dmac_desc_recycle_acked(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * OF xlate and channel filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) struct of_phandle_args *dma_spec = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * function knows from which device it wants to allocate a channel from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * and would be perfectly capable of selecting the channel it wants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * Forcing it to call dma_request_channel() and iterate through all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * channels from all controllers is just pointless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (chan->device->device_config != rcar_dmac_device_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return !test_and_set_bit(dma_spec->args[0], dmac->modules);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) struct rcar_dmac_chan *rchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (dma_spec->args_count != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) /* Only slave DMA channels can be allocated via DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) ofdma->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) rchan = to_rcar_dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) rchan->mid_rid = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * Power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static int rcar_dmac_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) static int rcar_dmac_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) struct rcar_dmac *dmac = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return rcar_dmac_init(dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) static const struct dev_pm_ops rcar_dmac_pm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * TODO for system sleep/resume:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * - Wait for the current transfer to complete and stop the device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * - Resume transfers, if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) pm_runtime_force_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * Probe and remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct rcar_dmac_chan *rchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) const struct rcar_dmac_of_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) struct platform_device *pdev = to_platform_device(dmac->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) struct dma_chan *chan = &rchan->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) char pdev_irqname[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) char *irqname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) rchan->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) rchan->iomem = dmac->iomem + data->chan_offset_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) data->chan_offset_stride * index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) rchan->mid_rid = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) spin_lock_init(&rchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) INIT_LIST_HEAD(&rchan->desc.free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) INIT_LIST_HEAD(&rchan->desc.pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) INIT_LIST_HEAD(&rchan->desc.active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) INIT_LIST_HEAD(&rchan->desc.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) INIT_LIST_HEAD(&rchan->desc.wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /* Request the channel interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) sprintf(pdev_irqname, "ch%u", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (rchan->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) dev_name(dmac->dev), index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (!irqname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * Initialize the DMA engine channel and add it to the DMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * channels list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) chan->device = &dmac->engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) dma_cookie_init(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) list_add_tail(&chan->device_node, &dmac->engine.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) rcar_dmac_isr_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) rcar_dmac_isr_channel_thread, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) irqname, rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) rchan->irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) #define RCAR_DMAC_MAX_CHANNELS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) struct device_node *np = dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) dev_err(dev, "unable to read dma-channels property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) /* The hardware and driver don't support more than 32 bits in CHCLR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (dmac->n_channels <= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) dev_err(dev, "invalid number of channels %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) dmac->n_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * If the driver is unable to read dma-channel-mask property,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * the driver assumes that it can use all channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) /* If the property has out-of-channel mask, this driver clears it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) static int rcar_dmac_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct dma_device *engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct rcar_dmac *dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) const struct rcar_dmac_of_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) data = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (!dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) dmac->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) platform_set_drvdata(pdev, dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) ret = rcar_dmac_parse_of(&pdev->dev, dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * is connected to microTLB 0 on currently supported platforms, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * can't use it with the IPMMU. As the IOMMU API operates at the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * level we can't disable it selectively, so ignore channel 0 for now if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) * the device is part of an IOMMU group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (device_iommu_mapped(&pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) dmac->channels_mask &= ~BIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) sizeof(*dmac->channels), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (!dmac->channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) /* Request resources. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (IS_ERR(dmac->iomem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) return PTR_ERR(dmac->iomem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) /* Enable runtime PM and initialize the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) ret = pm_runtime_resume_and_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) ret = rcar_dmac_init(dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) pm_runtime_put(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) dev_err(&pdev->dev, "failed to reset device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) /* Initialize engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) engine = &dmac->engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) dma_cap_set(DMA_MEMCPY, engine->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) dma_cap_set(DMA_SLAVE, engine->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) engine->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) engine->src_addr_widths = widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) engine->dst_addr_widths = widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) engine->device_config = rcar_dmac_device_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) engine->device_pause = rcar_dmac_chan_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) engine->device_terminate_all = rcar_dmac_chan_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) engine->device_tx_status = rcar_dmac_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) engine->device_issue_pending = rcar_dmac_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) engine->device_synchronize = rcar_dmac_device_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) INIT_LIST_HEAD(&engine->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) for (i = 0; i < dmac->n_channels; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (!(dmac->channels_mask & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) /* Register the DMAC as a DMA provider for DT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * Register the DMA engine device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * Default transfer size of 32 bytes requires 32-byte alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) ret = dma_async_device_register(engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) static int rcar_dmac_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) struct rcar_dmac *dmac = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) dma_async_device_unregister(&dmac->engine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) static void rcar_dmac_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct rcar_dmac *dmac = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) rcar_dmac_stop_all_chan(dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) static const struct rcar_dmac_of_data rcar_dmac_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) .chan_offset_base = 0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) .chan_offset_stride = 0x80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) static const struct of_device_id rcar_dmac_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) .compatible = "renesas,rcar-dmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) .data = &rcar_dmac_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) { /* Sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) static struct platform_driver rcar_dmac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) .pm = &rcar_dmac_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) .name = "rcar-dmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) .of_match_table = rcar_dmac_of_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) .probe = rcar_dmac_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) .remove = rcar_dmac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) .shutdown = rcar_dmac_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) module_platform_driver(rcar_dmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) MODULE_LICENSE("GPL v2");