^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for the Analog Devices AXI-DMAC core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2013-2019 Analog Devices Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Lars-Peter Clausen <lars@metafoo.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/regmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/fpga/adi-axi-common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <dt-bindings/dma/axi-dmac.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * various instantiation parameters which decided the exact feature set support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * by the core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Each channel of the core has a source interface and a destination interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * The number of channels and the type of the channel interfaces is selected at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * configuration time. A interface can either be a connected to a central memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * interconnect, which allows access to system memory, or it can be connected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * a dedicated bus which is directly connected to a data port on a peripheral.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Given that those are configuration options of the core that are selected when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * it is instantiated this means that they can not be changed by software at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * runtime. By extension this means that each channel is uni-directional. It can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * either be device to memory or memory to device, but not both. Also since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * device side is a dedicated data bus only connected to a single peripheral
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * there is no address than can or needs to be configured for the device side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define AXI_DMAC_REG_INTERFACE_DESC 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define AXI_DMAC_DMA_SRC_TYPE_MSK GENMASK(13, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define AXI_DMAC_DMA_SRC_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define AXI_DMAC_DMA_SRC_WIDTH_MSK GENMASK(11, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define AXI_DMAC_DMA_SRC_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define AXI_DMAC_DMA_DST_TYPE_MSK GENMASK(5, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define AXI_DMAC_REG_IRQ_MASK 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define AXI_DMAC_REG_IRQ_PENDING 0x84
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define AXI_DMAC_REG_IRQ_SOURCE 0x88
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define AXI_DMAC_REG_CTRL 0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define AXI_DMAC_REG_TRANSFER_ID 0x404
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define AXI_DMAC_REG_START_TRANSFER 0x408
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define AXI_DMAC_REG_FLAGS 0x40c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define AXI_DMAC_REG_DEST_ADDRESS 0x410
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define AXI_DMAC_REG_SRC_ADDRESS 0x414
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define AXI_DMAC_REG_X_LENGTH 0x418
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define AXI_DMAC_REG_Y_LENGTH 0x41c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define AXI_DMAC_REG_DEST_STRIDE 0x420
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define AXI_DMAC_REG_SRC_STRIDE 0x424
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define AXI_DMAC_REG_TRANSFER_DONE 0x428
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define AXI_DMAC_REG_STATUS 0x430
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define AXI_DMAC_CTRL_ENABLE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define AXI_DMAC_CTRL_PAUSE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define AXI_DMAC_IRQ_SOT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define AXI_DMAC_IRQ_EOT BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define AXI_DMAC_FLAG_CYCLIC BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define AXI_DMAC_FLAG_LAST BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* The maximum ID allocated by the hardware is 31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define AXI_DMAC_SG_UNUSED 32U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct axi_dmac_sg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dma_addr_t src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) dma_addr_t dest_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned int x_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned int y_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned int dest_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int src_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) unsigned int partial_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) bool schedule_when_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct axi_dmac_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct virt_dma_desc vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) bool have_partial_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned int num_submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned int num_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned int num_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct axi_dmac_sg sg[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct axi_dmac_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct virt_dma_chan vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct axi_dmac_desc *next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct list_head active_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) enum dma_transfer_direction direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned int src_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned int dest_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned int src_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned int dest_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned int max_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned int address_align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) unsigned int length_align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) bool hw_partial_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) bool hw_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bool hw_2d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct axi_dmac {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct dma_device dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct axi_dmac_chan chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return container_of(chan->vchan.chan.device, struct axi_dmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return container_of(c, struct axi_dmac_chan, vchan.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return container_of(vdesc, struct axi_dmac_desc, vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) writel(val, axi_dmac->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return readl(axi_dmac->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if ((len & chan->length_align_mask) != 0) /* Not aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if ((addr & chan->address_align_mask) != 0) /* Not aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct axi_dmac *dmac = chan_to_axi_dmac(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct axi_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct axi_dmac_sg *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (val) /* Queue is full, wait for the next SOT IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) desc = chan->next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) vdesc = vchan_next_desc(&chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) list_move_tail(&vdesc->node, &chan->active_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) desc = to_axi_dmac_desc(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) sg = &desc->sg[desc->num_submitted];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* Already queued in cyclic mode. Wait for it to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (sg->id != AXI_DMAC_SG_UNUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) sg->schedule_when_free = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) desc->num_submitted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (desc->num_submitted == desc->num_sgs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) desc->have_partial_xfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (desc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) desc->num_submitted = 0; /* Start again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) chan->next_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) flags |= AXI_DMAC_FLAG_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) chan->next_desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (axi_dmac_dest_is_mem(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (axi_dmac_src_is_mem(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * If the hardware supports cyclic transfers and there is no callback to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * call and only a single segment, enable hw cyclic mode to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * unnecessary interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) desc->num_sgs == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) flags |= AXI_DMAC_FLAG_CYCLIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (chan->hw_partial_xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return list_first_entry_or_null(&chan->active_descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct axi_dmac_desc, vdesc.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct axi_dmac_sg *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (chan->hw_2d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return sg->x_len * sg->y_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return sg->x_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct axi_dmac *dmac = chan_to_axi_dmac(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct axi_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct axi_dmac_sg *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u32 xfer_done, len, id, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) bool found_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) found_sg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) for (i = 0; i < desc->num_sgs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) sg = &desc->sg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (sg->id == AXI_DMAC_SG_UNUSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (sg->id == id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) desc->have_partial_xfer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) sg->partial_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) found_sg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (found_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (found_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) dev_dbg(dmac->dma_dev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) "Found partial segment id=%u, len=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) id, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) dev_warn(dmac->dma_dev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) "Not found partial segment id=%u, len=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) id, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* Check if we have any more partial transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) } while (!xfer_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct axi_dmac_desc *active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct dmaengine_result *rslt = &active->vdesc.tx_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned int start = active->num_completed - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct axi_dmac_sg *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned int i, total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rslt->result = DMA_TRANS_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rslt->residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * We get here if the last completed segment is partial, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * means we can compute the residue from that segment onwards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) for (i = start; i < active->num_sgs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) sg = &active->sg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) total = axi_dmac_total_sg_bytes(chan, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) rslt->residue += (total - sg->partial_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned int completed_transfers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct axi_dmac_desc *active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct axi_dmac_sg *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) bool start_next = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) active = axi_dmac_active_desc(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (chan->hw_partial_xfer &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) axi_dmac_dequeue_partial_xfers(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) sg = &active->sg[active->num_completed];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!(BIT(sg->id) & completed_transfers))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) active->num_completed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sg->id = AXI_DMAC_SG_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (sg->schedule_when_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) sg->schedule_when_free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) start_next = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (sg->partial_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) axi_dmac_compute_residue(chan, active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (active->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) vchan_cyclic_callback(&active->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (active->num_completed == active->num_sgs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) sg->partial_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (active->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) active->num_completed = 0; /* wrap around */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) list_del(&active->vdesc.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) vchan_cookie_complete(&active->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) active = axi_dmac_active_desc(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) } while (active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return start_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct axi_dmac *dmac = devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned int pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) bool start_next = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (!pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) spin_lock(&dmac->chan.vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* One or more transfers have finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (pending & AXI_DMAC_IRQ_EOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) unsigned int completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) start_next = axi_dmac_transfer_done(&dmac->chan, completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* Space has become available in the descriptor queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) axi_dmac_start_transfer(&dmac->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) spin_unlock(&dmac->chan.vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static int axi_dmac_terminate_all(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct axi_dmac *dmac = chan_to_axi_dmac(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) spin_lock_irqsave(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) chan->next_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) vchan_get_all_descriptors(&chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) list_splice_tail_init(&chan->active_descs, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) spin_unlock_irqrestore(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) vchan_dma_desc_free_list(&chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void axi_dmac_synchronize(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) vchan_synchronize(&chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void axi_dmac_issue_pending(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct axi_dmac *dmac = chan_to_axi_dmac(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) spin_lock_irqsave(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (vchan_issue_pending(&chan->vchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) axi_dmac_start_transfer(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) spin_unlock_irqrestore(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct axi_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) for (i = 0; i < num_sgs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) desc->sg[i].id = AXI_DMAC_SG_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) desc->num_sgs = num_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) enum dma_transfer_direction direction, dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) unsigned int num_periods, unsigned int period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct axi_dmac_sg *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) unsigned int num_segments, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) unsigned int segment_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* Split into multiple equally sized segments if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) num_segments = DIV_ROUND_UP(period_len, chan->max_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) segment_size = DIV_ROUND_UP(period_len, num_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* Take care of alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) for (i = 0; i < num_periods; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) len = period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) while (len > segment_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) sg->dest_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) sg->src_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) sg->x_len = segment_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) sg->y_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) sg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) addr += segment_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) len -= segment_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) sg->dest_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) sg->src_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) sg->x_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) sg->y_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) sg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct dma_chan *c, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) unsigned int sg_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct axi_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct axi_dmac_sg *dsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) unsigned int num_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (direction != chan->direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) num_sgs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) for_each_sg(sgl, sg, sg_len, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) desc = axi_dmac_alloc_desc(num_sgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) dsg = desc->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) !axi_dmac_check_len(chan, sg_dma_len(sg))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) sg_dma_len(sg), dsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) desc->cyclic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) size_t period_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct axi_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) unsigned int num_periods, num_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (direction != chan->direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (!axi_dmac_check_len(chan, buf_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) !axi_dmac_check_addr(chan, buf_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (period_len == 0 || buf_len % period_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) num_periods = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) num_segments = DIV_ROUND_UP(period_len, chan->max_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) desc = axi_dmac_alloc_desc(num_periods * num_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) period_len, desc->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) desc->cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct dma_chan *c, struct dma_interleaved_template *xt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct axi_dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) size_t dst_icg, src_icg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (xt->frame_size != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (xt->dir != chan->direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (axi_dmac_src_is_mem(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (axi_dmac_dest_is_mem(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (chan->hw_2d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) xt->numf == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (xt->sgl[0].size + dst_icg > chan->max_length ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) xt->sgl[0].size + src_icg > chan->max_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (dst_icg != 0 || src_icg != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (chan->max_length / xt->sgl[0].size < xt->numf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) desc = axi_dmac_alloc_desc(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (axi_dmac_src_is_mem(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) desc->sg[0].src_addr = xt->src_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (axi_dmac_dest_is_mem(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) desc->sg[0].dest_addr = xt->dst_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (chan->hw_2d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) desc->sg[0].x_len = xt->sgl[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) desc->sg[0].y_len = xt->numf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) desc->sg[0].y_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (flags & DMA_CYCLIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) desc->cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static void axi_dmac_free_chan_resources(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) vchan_free_chan_resources(to_virt_chan(c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) case AXI_DMAC_REG_IRQ_MASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) case AXI_DMAC_REG_IRQ_SOURCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) case AXI_DMAC_REG_IRQ_PENDING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) case AXI_DMAC_REG_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) case AXI_DMAC_REG_TRANSFER_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) case AXI_DMAC_REG_START_TRANSFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) case AXI_DMAC_REG_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) case AXI_DMAC_REG_DEST_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) case AXI_DMAC_REG_SRC_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) case AXI_DMAC_REG_X_LENGTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) case AXI_DMAC_REG_Y_LENGTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) case AXI_DMAC_REG_DEST_STRIDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) case AXI_DMAC_REG_SRC_STRIDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) case AXI_DMAC_REG_TRANSFER_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) case AXI_DMAC_REG_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) case AXI_DMAC_REG_CURRENT_SRC_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) case AXI_DMAC_REG_CURRENT_DEST_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) case AXI_DMAC_REG_PARTIAL_XFER_LEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) case AXI_DMAC_REG_PARTIAL_XFER_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static const struct regmap_config axi_dmac_regmap_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) .reg_bits = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) .val_bits = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) .reg_stride = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) .readable_reg = axi_dmac_regmap_rdwr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) .writeable_reg = axi_dmac_regmap_rdwr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) chan->direction = DMA_MEM_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) chan->direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) chan->direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) chan->direction = DMA_DEV_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * The configuration stored in the devicetree matches the configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * parameters of the peripheral instance and allows the driver to know which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * features are implemented and how it should behave.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct axi_dmac_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ret = of_property_read_u32(of_chan, "reg", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* We only support 1 channel for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (val != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (val > AXI_DMAC_BUS_TYPE_FIFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) chan->src_type = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (val > AXI_DMAC_BUS_TYPE_FIFO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) chan->dest_type = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) chan->src_width = val / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) chan->dest_width = val / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) axi_dmac_adjust_chan_params(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct device_node *of_channels, *of_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) of_channels = of_get_child_by_name(dev->of_node, "adi,channels");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (of_channels == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) for_each_child_of_node(of_channels, of_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) of_node_put(of_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) of_node_put(of_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) of_node_put(of_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct axi_dmac_chan *chan = &dmac->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) unsigned int val, desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) desc = axi_dmac_read(dmac, AXI_DMAC_REG_INTERFACE_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (desc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) dev_err(dev, "DMA interface register reads zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) val = AXI_DMAC_DMA_SRC_TYPE_GET(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (val > AXI_DMAC_BUS_TYPE_FIFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) dev_err(dev, "Invalid source bus type read: %d\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) chan->src_type = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) val = AXI_DMAC_DMA_DST_TYPE_GET(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (val > AXI_DMAC_BUS_TYPE_FIFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) dev_err(dev, "Invalid destination bus type read: %d\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) chan->dest_type = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (val == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) dev_err(dev, "Source bus width is zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /* widths are stored in log2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) chan->src_width = 1 << val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) val = AXI_DMAC_DMA_DST_WIDTH_GET(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (val == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dev_err(dev, "Destination bus width is zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) chan->dest_width = 1 << val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) axi_dmac_adjust_chan_params(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct axi_dmac_chan *chan = &dmac->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) chan->hw_cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) chan->hw_2d = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (chan->max_length != UINT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) chan->max_length++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) dev_err(dmac->dma_dev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) "Destination memory-mapped interface not supported.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) dev_err(dmac->dma_dev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) "Source memory-mapped interface not supported.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) chan->hw_partial_xfer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) chan->length_align_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) chan->length_align_mask = chan->address_align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static int axi_dmac_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct dma_device *dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct axi_dmac *dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct regmap *regmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) unsigned int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (!dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) dmac->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (dmac->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return dmac->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (dmac->irq == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) dmac->base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (IS_ERR(dmac->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return PTR_ERR(dmac->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) dmac->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (IS_ERR(dmac->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return PTR_ERR(dmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ret = clk_prepare_enable(dmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ret = axi_dmac_read_chan_config(&pdev->dev, dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ret = axi_dmac_parse_dt(&pdev->dev, dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) INIT_LIST_HEAD(&dmac->chan.active_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dma_set_max_seg_size(&pdev->dev, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) dma_dev = &dmac->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dma_dev->device_tx_status = dma_cookie_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) dma_dev->device_issue_pending = axi_dmac_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) dma_dev->device_terminate_all = axi_dmac_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) dma_dev->device_synchronize = axi_dmac_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) dma_dev->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) dma_dev->chancnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) dma_dev->directions = BIT(dmac->chan.direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) INIT_LIST_HEAD(&dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) dmac->chan.vchan.desc_free = axi_dmac_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) vchan_init(&dmac->chan.vchan, dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) ret = axi_dmac_detect_caps(dmac, version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) ret = dma_async_device_register(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) ret = of_dma_controller_register(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) of_dma_xlate_by_chan_id, dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) goto err_unregister_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) dev_name(&pdev->dev), dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) goto err_unregister_of;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) platform_set_drvdata(pdev, dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) &axi_dmac_regmap_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (IS_ERR(regmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) ret = PTR_ERR(regmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) free_irq(dmac->irq, dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) err_unregister_of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) err_unregister_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) dma_async_device_unregister(&dmac->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) err_clk_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) clk_disable_unprepare(dmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static int axi_dmac_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct axi_dmac *dmac = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) free_irq(dmac->irq, dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) tasklet_kill(&dmac->chan.vchan.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) dma_async_device_unregister(&dmac->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) clk_disable_unprepare(dmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static const struct of_device_id axi_dmac_of_match_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) { .compatible = "adi,axi-dmac-1.00.a" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static struct platform_driver axi_dmac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) .name = "dma-axi-dmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) .of_match_table = axi_dmac_of_match_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) .probe = axi_dmac_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) .remove = axi_dmac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) module_platform_driver(axi_dmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) MODULE_LICENSE("GPL v2");