^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2013 - 2015 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2013 Hisilicon Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DRIVER_NAME "k3-dma"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DMA_MAX_SIZE 0x1ffc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DMA_CYCLIC_MAX_PERIOD 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define INT_STAT 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define INT_TC1 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define INT_TC2 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define INT_ERR1 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define INT_ERR2 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define INT_TC1_MASK 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define INT_TC2_MASK 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define INT_ERR1_MASK 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define INT_ERR2_MASK 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define INT_TC1_RAW 0x600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define INT_TC2_RAW 0x608
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define INT_ERR1_RAW 0x610
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define INT_ERR2_RAW 0x618
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define CH_PRI 0x688
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define CH_STAT 0x690
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define CX_CUR_CNT 0x704
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define CX_LLI 0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define CX_CNT1 0x80c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define CX_CNT0 0x810
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define CX_SRC 0x814
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define CX_DST 0x818
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define CX_CFG 0x81c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define CX_LLI_CHAIN_EN 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define CX_CFG_EN 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define CX_CFG_NODEIRQ BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define CX_CFG_MEM2PER (0x1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define CX_CFG_PER2MEM (0x2 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define CX_CFG_SRCINCR (0x1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define CX_CFG_DSTINCR (0x1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct k3_desc_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u32 reserved[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u32 saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u32 daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u32 config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) } __aligned(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct k3_dma_desc_sw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) dma_addr_t desc_hw_lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) size_t desc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct k3_desc_hw *desc_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct k3_dma_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct k3_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 ccfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct k3_dma_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) dma_addr_t dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct dma_slave_config slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct k3_dma_phy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct k3_dma_chan *vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct k3_dma_desc_sw *ds_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct k3_dma_desc_sw *ds_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct k3_dma_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct dma_device slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct tasklet_struct task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct list_head chan_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct k3_dma_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct k3_dma_chan *chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct dma_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 dma_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u32 dma_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u32 dma_channel_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define K3_FLAG_NOCLK BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct k3dma_soc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int k3_dma_config_write(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct dma_slave_config *cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return container_of(chan, struct k3_dma_chan, vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) val = readl_relaxed(phy->base + CX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) val |= CX_CFG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) writel_relaxed(val, phy->base + CX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) val = readl_relaxed(phy->base + CX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) val &= ~CX_CFG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) writel_relaxed(val, phy->base + CX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) k3_dma_pause_dma(phy, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) val = 0x1 << phy->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) writel_relaxed(val, d->base + INT_TC1_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) writel_relaxed(val, d->base + INT_TC2_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) writel_relaxed(val, d->base + INT_ERR1_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) writel_relaxed(val, d->base + INT_ERR2_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) writel_relaxed(hw->lli, phy->base + CX_LLI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) writel_relaxed(hw->count, phy->base + CX_CNT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) writel_relaxed(hw->saddr, phy->base + CX_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) writel_relaxed(hw->daddr, phy->base + CX_DST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) writel_relaxed(hw->config, phy->base + CX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u32 cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cnt &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return readl_relaxed(phy->base + CX_LLI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return readl_relaxed(d->base + CH_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* set same priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) writel_relaxed(0x0, d->base + CH_PRI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* unmask irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) writel_relaxed(0xffff, d->base + INT_TC1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) writel_relaxed(0xffff, d->base + INT_TC2_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* mask irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) writel_relaxed(0x0, d->base + INT_TC1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) writel_relaxed(0x0, d->base + INT_TC2_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) writel_relaxed(0x0, d->base + INT_ERR1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) writel_relaxed(0x0, d->base + INT_ERR2_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct k3_dma_phy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct k3_dma_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) u32 stat = readl_relaxed(d->base + INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u32 tc1 = readl_relaxed(d->base + INT_TC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u32 tc2 = readl_relaxed(d->base + INT_TC2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u32 err1 = readl_relaxed(d->base + INT_ERR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 err2 = readl_relaxed(d->base + INT_ERR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u32 i, irq_chan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) while (stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) i = __ffs(stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) stat &= ~BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) p = &d->phy[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) c = p->vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (c && (tc1 & BIT(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (p->ds_run != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) vchan_cookie_complete(&p->ds_run->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) p->ds_done = p->ds_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) p->ds_run = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (c && (tc2 & BIT(i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (p->ds_run != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) vchan_cyclic_callback(&p->ds_run->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) irq_chan |= BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dev_warn(d->slave.dev, "DMA ERR\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) writel_relaxed(err1, d->base + INT_ERR1_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) writel_relaxed(err2, d->base + INT_ERR2_RAW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (irq_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) tasklet_schedule(&d->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (irq_chan || err1 || err2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static int k3_dma_start_txd(struct k3_dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!c->phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* Avoid losing track of ds_run if a transaction is in flight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (c->phy->ds_run)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct k3_dma_desc_sw *ds =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) container_of(vd, struct k3_dma_desc_sw, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * fetch and remove request from vc->desc_issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * so vc->desc_issued only contains desc pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) list_del(&ds->vd.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) c->phy->ds_run = ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) c->phy->ds_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* start dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) c->phy->ds_run = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) c->phy->ds_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void k3_dma_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct k3_dma_dev *d = from_tasklet(d, t, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct k3_dma_phy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct k3_dma_chan *c, *cn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) unsigned pch, pch_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* check new dma request of running channel in vc->desc_issued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_lock_irq(&c->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (p && p->ds_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (k3_dma_start_txd(c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* No current txd associated with this channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Mark this channel free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) c->phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) p->vchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) spin_unlock_irq(&c->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* check new channel request in d->chan_pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) spin_lock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) for (pch = 0; pch < d->dma_channels; pch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!(d->dma_channel_mask & (1 << pch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) p = &d->phy[pch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) c = list_first_entry(&d->chan_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct k3_dma_chan, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* remove from d->chan_pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) list_del_init(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) pch_alloc |= 1 << pch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Mark this channel allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) p->vchan = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) c->phy = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) spin_unlock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) for (pch = 0; pch < d->dma_channels; pch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!(d->dma_channel_mask & (1 << pch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (pch_alloc & (1 << pch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) p = &d->phy[pch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) c = p->vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) spin_lock_irq(&c->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) k3_dma_start_txd(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) spin_unlock_irq(&c->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static void k3_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct k3_dma_dev *d = to_k3_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) spin_lock_irqsave(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) list_del_init(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_unlock_irqrestore(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) vchan_free_chan_resources(&c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) c->ccfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dma_cookie_t cookie, struct dma_tx_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct k3_dma_dev *d = to_k3_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct k3_dma_phy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) size_t bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ret = dma_cookie_status(&c->vc.chan, cookie, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ret = c->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * If the cookie is on our issue queue, then the residue is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * its total size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) vd = vchan_find_desc(&c->vc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (vd && !c->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) } else if ((!p) || (!p->ds_run)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct k3_dma_desc_sw *ds = p->ds_run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u32 clli = 0, index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) bytes = k3_dma_get_curr_cnt(d, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) clli = k3_dma_get_curr_lli(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) index = ((clli - ds->desc_hw_lli) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) sizeof(struct k3_desc_hw)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) for (; index < ds->desc_num; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) bytes += ds->desc_hw[index].count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* end of lli */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (!ds->desc_hw[index].lli)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) dma_set_residue(state, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static void k3_dma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct k3_dma_dev *d = to_k3_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /* add request to vc->desc_issued */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (vchan_issue_pending(&c->vc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (!c->phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (list_empty(&c->node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* if new channel, add chan_pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) list_add_tail(&c->node, &d->chan_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* check in tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) tasklet_schedule(&d->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dma_addr_t src, size_t len, u32 num, u32 ccfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (num != ds->desc_num - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) sizeof(struct k3_desc_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ds->desc_hw[num].count = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ds->desc_hw[num].saddr = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ds->desc_hw[num].daddr = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ds->desc_hw[num].config = ccfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct k3_dma_desc_sw *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct k3_dma_dev *d = to_k3_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (num > lli_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) &c->vc, num, lli_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (!ds->desc_hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) kfree(ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) ds->desc_num = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct k3_dma_desc_sw *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) size_t copy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ds = k3_dma_alloc_desc_resource(num, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (!ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) c->cyclic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ds->size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (!c->ccfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* default is memtomem, without calling device_config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) copy = min_t(size_t, len, DMA_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) src += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dst += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) len -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ds->desc_hw[num-1].lli = 0; /* end of link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return vchan_tx_prep(&c->vc, &ds->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) enum dma_transfer_direction dir, unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct k3_dma_desc_sw *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) size_t len, avail, total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dma_addr_t addr, src = 0, dst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int num = sglen, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (sgl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) c->cyclic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) for_each_sg(sgl, sg, sglen, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) avail = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (avail > DMA_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ds = k3_dma_alloc_desc_resource(num, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) k3_dma_config_write(chan, dir, &c->slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) for_each_sg(sgl, sg, sglen, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) avail = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) total += avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) len = min_t(size_t, avail, DMA_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) src = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) dst = c->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) } else if (dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) src = c->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) dst = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) avail -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) } while (avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ds->desc_hw[num-1].lli = 0; /* end of link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ds->size = total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return vchan_tx_prep(&c->vc, &ds->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) size_t buf_len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct k3_dma_desc_sw *ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) size_t len, avail, total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dma_addr_t addr, src = 0, dst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) int num = 1, since = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) size_t modulo = DMA_CYCLIC_MAX_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) u32 en_tc2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) buf_len, period_len, (int)dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) avail = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (avail > modulo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) num += DIV_ROUND_UP(avail, modulo) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ds = k3_dma_alloc_desc_resource(num, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (!ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) c->cyclic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) addr = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) avail = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) total = avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) k3_dma_config_write(chan, dir, &c->slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (period_len < modulo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) modulo = period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) len = min_t(size_t, avail, modulo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) src = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) dst = c->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) } else if (dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) src = c->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) dst = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) since += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (since >= period_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* descriptor asks for TC2 interrupt on completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) en_tc2 = CX_CFG_NODEIRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) since -= period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) en_tc2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) avail -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) } while (avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* "Cyclic" == end of link points back to start of link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ds->size = total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return vchan_tx_prep(&c->vc, &ds->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static int k3_dma_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct dma_slave_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) memcpy(&c->slave_config, cfg, sizeof(*cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static int k3_dma_config_write(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct dma_slave_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) u32 maxburst = 0, val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) c->ccfg = CX_CFG_DSTINCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) c->dev_addr = cfg->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) maxburst = cfg->src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) width = cfg->src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) } else if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) c->ccfg = CX_CFG_SRCINCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) c->dev_addr = cfg->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) maxburst = cfg->dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) width = cfg->dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) switch (width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) case DMA_SLAVE_BUSWIDTH_1_BYTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) case DMA_SLAVE_BUSWIDTH_2_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) case DMA_SLAVE_BUSWIDTH_4_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) case DMA_SLAVE_BUSWIDTH_8_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) val = __ffs(width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) val = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) c->ccfg |= (val << 12) | (val << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if ((maxburst == 0) || (maxburst > 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) val = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) val = maxburst - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) c->ccfg |= (val << 20) | (val << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* specific request line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) c->ccfg |= c->vc.chan.chan_id << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static void k3_dma_free_desc(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct k3_dma_desc_sw *ds =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) container_of(vd, struct k3_dma_desc_sw, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) kfree(ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static int k3_dma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct k3_dma_dev *d = to_k3_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct k3_dma_phy *p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* Prevent this channel being scheduled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) list_del_init(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* Clear the tx descriptor lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) vchan_get_all_descriptors(&c->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* vchan is assigned to a pchan - stop the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) k3_dma_terminate_chan(p, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) c->phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) p->vchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (p->ds_run) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) vchan_terminate_vdesc(&p->ds_run->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) p->ds_run = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) p->ds_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) vchan_dma_desc_free_list(&c->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static void k3_dma_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) vchan_synchronize(&c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static int k3_dma_transfer_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct k3_dma_dev *d = to_k3_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct k3_dma_phy *p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (c->status == DMA_IN_PROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) c->status = DMA_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) k3_dma_pause_dma(p, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) list_del_init(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static int k3_dma_transfer_resume(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct k3_dma_chan *c = to_k3_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct k3_dma_dev *d = to_k3_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct k3_dma_phy *p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (c->status == DMA_PAUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) c->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) k3_dma_pause_dma(p, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) } else if (!list_empty(&c->vc.desc_issued)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) list_add_tail(&c->node, &d->chan_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static const struct k3dma_soc_data k3_v1_dma_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) .flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static const struct k3dma_soc_data asp_v1_dma_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) .flags = K3_FLAG_NOCLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static const struct of_device_id k3_pdma_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) { .compatible = "hisilicon,k3-dma-1.0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) .data = &k3_v1_dma_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) .data = &asp_v1_dma_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct k3_dma_dev *d = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) unsigned int request = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (request >= d->dma_requests)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return dma_get_slave_channel(&(d->chans[request].vc.chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static int k3_dma_probe(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) const struct k3dma_soc_data *soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct k3_dma_dev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) const struct of_device_id *of_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) int i, ret, irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) soc_data = device_get_match_data(&op->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (!soc_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) d->base = devm_platform_ioremap_resource(op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (IS_ERR(d->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return PTR_ERR(d->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (of_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) of_property_read_u32((&op->dev)->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) "dma-channels", &d->dma_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) of_property_read_u32((&op->dev)->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) "dma-requests", &d->dma_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ret = of_property_read_u32((&op->dev)->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) "dma-channel-mask", &d->dma_channel_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) dev_warn(&op->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) "dma-channel-mask doesn't exist, considering all as available.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) d->dma_channel_mask = (u32)~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!(soc_data->flags & K3_FLAG_NOCLK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) d->clk = devm_clk_get(&op->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (IS_ERR(d->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) dev_err(&op->dev, "no dma clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return PTR_ERR(d->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) irq = platform_get_irq(op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) ret = devm_request_irq(&op->dev, irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) k3_dma_int_handler, 0, DRIVER_NAME, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) d->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* A DMA memory pool for LLIs, align on 32-byte boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) LLI_BLOCK_SIZE, 32, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (!d->pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* init phy channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) d->phy = devm_kcalloc(&op->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (d->phy == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) for (i = 0; i < d->dma_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct k3_dma_phy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (!(d->dma_channel_mask & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) p = &d->phy[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) p->idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) p->base = d->base + i * 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) INIT_LIST_HEAD(&d->slave.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) d->slave.dev = &op->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) d->slave.device_tx_status = k3_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) d->slave.device_issue_pending = k3_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) d->slave.device_config = k3_dma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) d->slave.device_pause = k3_dma_transfer_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) d->slave.device_resume = k3_dma_transfer_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) d->slave.device_terminate_all = k3_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) d->slave.device_synchronize = k3_dma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* init virtual channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) d->chans = devm_kcalloc(&op->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (d->chans == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) for (i = 0; i < d->dma_requests; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct k3_dma_chan *c = &d->chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) c->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) INIT_LIST_HEAD(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) c->vc.desc_free = k3_dma_free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) vchan_init(&c->vc, &d->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* Enable clock before accessing registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ret = clk_prepare_enable(d->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) k3_dma_enable_dma(d, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) ret = dma_async_device_register(&d->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) goto dma_async_register_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ret = of_dma_controller_register((&op->dev)->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) k3_of_dma_simple_xlate, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) goto of_dma_register_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) spin_lock_init(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) INIT_LIST_HEAD(&d->chan_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) tasklet_setup(&d->task, k3_dma_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) platform_set_drvdata(op, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) dev_info(&op->dev, "initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) of_dma_register_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) dma_async_device_unregister(&d->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) dma_async_register_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) clk_disable_unprepare(d->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static int k3_dma_remove(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct k3_dma_chan *c, *cn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct k3_dma_dev *d = platform_get_drvdata(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) dma_async_device_unregister(&d->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) of_dma_controller_free((&op->dev)->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) devm_free_irq(&op->dev, d->irq, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) list_del(&c->vc.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) tasklet_kill(&c->vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) tasklet_kill(&d->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) clk_disable_unprepare(d->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static int k3_dma_suspend_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct k3_dma_dev *d = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) u32 stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) stat = k3_dma_get_chan_stat(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) dev_warn(d->slave.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) "chan %d is running fail to suspend\n", stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) k3_dma_enable_dma(d, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) clk_disable_unprepare(d->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static int k3_dma_resume_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct k3_dma_dev *d = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ret = clk_prepare_enable(d->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) k3_dma_enable_dma(d, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static struct platform_driver k3_pdma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) .pm = &k3_dma_pmops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) .of_match_table = k3_pdma_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) .probe = k3_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) .remove = k3_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) module_platform_driver(k3_pdma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) MODULE_ALIAS("platform:k3dma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) MODULE_LICENSE("GPL v2");