^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sys_soc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/soc/ti/k3-ringacc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/soc/ti/ti_sci_protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/soc/ti/ti_sci_inta_msi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/dma/ti-cppi5.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "../virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "k3-udma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "k3-psil-priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct udma_static_tr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) u8 elsize; /* RPSTR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) u16 elcnt; /* RPSTR0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u16 bstcnt; /* RPSTR1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define K3_UDMA_MAX_RFLOWS 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define K3_UDMA_DEFAULT_RING_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define UDMA_RFLOW_SRCTAG_NONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define UDMA_RFLOW_DSTTAG_NONE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct udma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) enum udma_mmr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) MMR_GCFG = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) MMR_RCHANRT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) MMR_TCHANRT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) MMR_LAST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct udma_tchan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) void __iomem *reg_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct k3_ring *t_ring; /* Transmit ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct k3_ring *tc_ring; /* Transmit Completion ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct udma_rflow {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct k3_ring *fd_ring; /* Free Descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct k3_ring *r_ring; /* Receive ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct udma_rchan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) void __iomem *reg_rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define UDMA_FLAG_PDMA_ACC32 BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define UDMA_FLAG_PDMA_BURST BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct udma_match_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u32 psil_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) bool enable_memcpy_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 statictr_z_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct udma_soc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 rchan_oes_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct udma_hwdesc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) size_t cppi5_desc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void *cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) dma_addr_t cppi5_desc_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* TR descriptor internal pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void *tr_req_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct cppi5_tr_resp_t *tr_resp_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct udma_rx_flush {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct udma_hwdesc hwdescs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) size_t buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) void *buffer_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) dma_addr_t buffer_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct udma_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct dma_device ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) void __iomem *mmrs[MMR_LAST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) const struct udma_match_data *match_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) const struct udma_soc_data *soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u8 tpl_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 tpl_start_idx[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) size_t desc_align; /* alignment to use for descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct udma_tisci_rm tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct k3_ringacc *ringacc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct work_struct purge_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct list_head desc_to_purge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct udma_rx_flush rx_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int tchan_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int echan_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int rchan_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int rflow_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long *tchan_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned long *rchan_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned long *rflow_gp_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned long *rflow_gp_map_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long *rflow_in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct udma_tchan *tchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct udma_rchan *rchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct udma_rflow *rflows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct udma_chan *channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) u32 psil_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u32 atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct udma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bool terminated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) enum dma_transfer_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct udma_static_tr static_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u32 residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned int sglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned int desc_idx; /* Only used for cyclic in packet mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned int tr_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u32 metadata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned int hwdesc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct udma_hwdesc hwdesc[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) enum udma_chan_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) UDMA_CHAN_IS_ACTIVE, /* Normal operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct udma_tx_drain {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct delayed_work work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ktime_t tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u32 residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct udma_chan_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) bool pkt_mode; /* TR or packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) bool needs_epib; /* EPIB is needed for the communication or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u32 psd_size; /* size of Protocol Specific Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 hdesc_size; /* Size of a packet descriptor in packet mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) bool notdpkt; /* Suppress sending TDC packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int remote_thread_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) u32 atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) u32 src_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u32 dst_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) enum psil_endpoint_type ep_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bool enable_acc32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bool enable_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) enum udma_tp_level channel_tpl; /* Channel Throughput Level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) enum dma_transfer_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct udma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct dma_slave_config cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct udma_dev *ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct udma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct udma_desc *terminated_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct udma_static_tr static_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct udma_tchan *tchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct udma_rchan *rchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct udma_rflow *rflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) bool psil_paired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int irq_num_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int irq_num_udma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) bool paused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) enum udma_chan_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct completion teardown_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct udma_tx_drain tx_drain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u32 bcnt; /* number of bytes completed since the start of the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Channel configuration parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct udma_chan_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* dmapool for packet mode descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) bool use_dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct dma_pool *hdesc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static inline struct udma_dev *to_udma_dev(struct dma_device *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return container_of(d, struct udma_dev, ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return container_of(c, struct udma_chan, vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return container_of(t, struct udma_desc, vd.tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* Generic register access functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static inline u32 udma_read(void __iomem *base, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return readl(base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static inline void udma_write(void __iomem *base, int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) writel(val, base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static inline void udma_update_bits(void __iomem *base, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u32 mask, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u32 tmp, orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) orig = readl(base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) tmp = orig & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) tmp |= (val & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (tmp != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) writel(tmp, base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* TCHANRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!uc->tchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return udma_read(uc->tchan->reg_rt, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!uc->tchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) udma_write(uc->tchan->reg_rt, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u32 mask, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (!uc->tchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* RCHANRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!uc->rchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return udma_read(uc->rchan->reg_rt, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (!uc->rchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) udma_write(uc->rchan->reg_rt, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) u32 mask, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!uc->rchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) tisci_rm->tisci_navss_dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) src_thread, dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) u32 dst_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) tisci_rm->tisci_navss_dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) src_thread, dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void udma_reset_uchan(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) memset(&uc->config, 0, sizeof(uc->config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) uc->config.remote_thread_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) uc->state = UDMA_CHAN_IS_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static void udma_dump_chan_stdata(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct device *dev = uc->ud->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dev_dbg(dev, "TCHAN State data:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) udma_tchanrt_read(uc, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) dev_dbg(dev, "RCHAN State data:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) udma_rchanrt_read(uc, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return d->hwdesc[idx].cppi5_desc_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return d->hwdesc[idx].cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) dma_addr_t paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct udma_desc *d = uc->terminated_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) d->desc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (desc_paddr != paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) d = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) d = uc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) d->desc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (desc_paddr != paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) d = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (uc->use_dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) for (i = 0; i < d->hwdesc_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (!d->hwdesc[i].cppi5_desc_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) dma_pool_free(uc->hdesc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) d->hwdesc[i].cppi5_desc_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) d->hwdesc[i].cppi5_desc_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) d->hwdesc[i].cppi5_desc_vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) } else if (d->hwdesc[0].cppi5_desc_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) d->hwdesc[0].cppi5_desc_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) d->hwdesc[0].cppi5_desc_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) d->hwdesc[0].cppi5_desc_vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void udma_purge_desc_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct virt_dma_desc *vd, *_vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) spin_lock_irqsave(&ud->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) list_splice_tail_init(&ud->desc_to_purge, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) spin_unlock_irqrestore(&ud->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) list_for_each_entry_safe(vd, _vd, &head, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct udma_chan *uc = to_udma_chan(vd->tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct udma_desc *d = to_udma_desc(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) udma_free_hwdesc(uc, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* If more to purge, schedule the work again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (!list_empty(&ud->desc_to_purge))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) schedule_work(&ud->purge_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static void udma_desc_free(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct udma_chan *uc = to_udma_chan(vd->tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct udma_desc *d = to_udma_desc(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (uc->terminated_desc == d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) uc->terminated_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (uc->use_dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) udma_free_hwdesc(uc, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) spin_lock_irqsave(&ud->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) list_add_tail(&vd->node, &ud->desc_to_purge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) spin_unlock_irqrestore(&ud->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) schedule_work(&ud->purge_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static bool udma_is_chan_running(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) u32 trt_ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) u32 rrt_ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (uc->tchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (uc->rchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static bool udma_is_chan_paused(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) u32 val, pause_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) switch (uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) pause_mask = UDMA_PEER_RT_EN_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) pause_mask = UDMA_PEER_RT_EN_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (val & pause_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static int udma_push_to_ring(struct udma_chan *uc, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct udma_desc *d = uc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct k3_ring *ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dma_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) switch (uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ring = uc->rflow->fd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ring = uc->tchan->t_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (idx == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) paddr = udma_get_rx_flush_hwdesc_paddr(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) paddr = udma_curr_cppi5_desc_paddr(d, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) wmb(); /* Ensure that writes are not moved over this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return k3_ringacc_ring_push(ring, &paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (uc->config.dir != DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct k3_ring *ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) switch (uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ring = uc->rflow->r_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ring = uc->tchan->tc_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ret = k3_ringacc_ring_pop(ring, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) rmb(); /* Ensure that reads are not moved before this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* Teardown completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (cppi5_desc_is_tdcm(*addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* Check for flush descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (udma_desc_is_rx_flush(uc, *addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static void udma_reset_rings(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct k3_ring *ring1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct k3_ring *ring2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) switch (uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (uc->rchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ring1 = uc->rflow->fd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) ring2 = uc->rflow->r_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (uc->tchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ring1 = uc->tchan->t_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ring2 = uc->tchan->tc_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (ring1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) k3_ringacc_ring_reset_dma(ring1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) k3_ringacc_ring_get_occ(ring1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (ring2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) k3_ringacc_ring_reset(ring2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* make sure we are not leaking memory by stalled descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (uc->terminated_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) udma_desc_free(&uc->terminated_desc->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) uc->terminated_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static void udma_reset_counters(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (uc->tchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (uc->rchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) uc->bcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static int udma_reset_chan(struct udma_chan *uc, bool hard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) switch (uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* Reset all counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) udma_reset_counters(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* Hard reset: re-initialize the channel to reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (hard) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct udma_chan_config ucc_backup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* restore the channel configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * Setting forced teardown after forced reset helps recovering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * the rchan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (uc->config.dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) UDMA_CHAN_RT_CTL_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) UDMA_CHAN_RT_CTL_TDOWN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) UDMA_CHAN_RT_CTL_FTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) uc->state = UDMA_CHAN_IS_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static void udma_start_desc(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct udma_chan_config *ucc = &uc->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Push all descriptors to ring for packet mode cyclic or RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) for (i = 0; i < uc->desc->sglen; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) udma_push_to_ring(uc, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) udma_push_to_ring(uc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* Only PDMAs have staticTR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (uc->config.ep_type == PSIL_EP_NATIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* Check if the staticTR configuration has changed for TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static int udma_start(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (!vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) uc->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) uc->desc = to_udma_desc(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* Channel is already running and does not need reconfiguration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) udma_start_desc(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /* Make sure that we clear the teardown bit, if it is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) udma_reset_chan(uc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /* Push descriptors before we start the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) udma_start_desc(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) switch (uc->desc->dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* Config remote TR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) const struct udma_match_data *match_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) uc->ud->match_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (uc->config.enable_acc32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) val |= PDMA_STATIC_TR_XY_ACC32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (uc->config.enable_burst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) val |= PDMA_STATIC_TR_XY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) udma_rchanrt_write(uc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) udma_rchanrt_write(uc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) match_data->statictr_z_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* save the current staticTR configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) memcpy(&uc->static_tr, &uc->desc->static_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) sizeof(uc->static_tr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) UDMA_CHAN_RT_CTL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* Enable remote */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) UDMA_PEER_RT_EN_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /* Config remote TR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (uc->config.enable_acc32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) val |= PDMA_STATIC_TR_XY_ACC32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (uc->config.enable_burst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) val |= PDMA_STATIC_TR_XY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) udma_tchanrt_write(uc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* save the current staticTR configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) memcpy(&uc->static_tr, &uc->desc->static_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) sizeof(uc->static_tr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* Enable remote */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) UDMA_PEER_RT_EN_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) UDMA_CHAN_RT_CTL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) UDMA_CHAN_RT_CTL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) UDMA_CHAN_RT_CTL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) uc->state = UDMA_CHAN_IS_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static int udma_stop(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) enum udma_chan_state old_state = uc->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) uc->state = UDMA_CHAN_IS_TERMINATING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) reinit_completion(&uc->teardown_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) switch (uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (!uc->cyclic && !uc->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) udma_push_to_ring(uc, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) UDMA_PEER_RT_EN_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) UDMA_PEER_RT_EN_TEARDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) UDMA_PEER_RT_EN_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) UDMA_PEER_RT_EN_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) UDMA_CHAN_RT_CTL_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) UDMA_CHAN_RT_CTL_TDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) UDMA_CHAN_RT_CTL_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) UDMA_CHAN_RT_CTL_TDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) uc->state = old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) complete_all(&uc->teardown_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct udma_desc *d = uc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct cppi5_host_desc_t *h_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) cppi5_hdesc_reset_to_original(h_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) udma_push_to_ring(uc, d->desc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) d->desc_idx = (d->desc_idx + 1) % d->sglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) memcpy(d->metadata, h_desc->epib, d->metadata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) u32 peer_bcnt, bcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* Only TX towards PDMA is affected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (uc->config.ep_type == PSIL_EP_NATIVE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) uc->config.dir != DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Transfer is incomplete, store current residue and time stamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (peer_bcnt < bcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) uc->tx_drain.residue = bcnt - peer_bcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) uc->tx_drain.tstamp = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static void udma_check_tx_completion(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct udma_chan *uc = container_of(work, typeof(*uc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) tx_drain.work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) bool desc_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) u32 residue_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ktime_t time_diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) unsigned long delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (uc->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* Get previous residue and time stamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) residue_diff = uc->tx_drain.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) time_diff = uc->tx_drain.tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * Get current residue and time stamp or see if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * transfer is complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) desc_done = udma_is_desc_really_done(uc, uc->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (!desc_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * Find the time delta and residue delta w.r.t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * previous poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) time_diff = ktime_sub(uc->tx_drain.tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) time_diff) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) residue_diff -= uc->tx_drain.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (residue_diff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * Try to guess when we should check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * next time by calculating rate at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * which data is being drained at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * peer device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) delay = (time_diff / residue_diff) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) uc->tx_drain.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /* No progress, check again in 1 second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) schedule_delayed_work(&uc->tx_drain.work, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) usleep_range(ktime_to_us(delay),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ktime_to_us(delay) + 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (uc->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct udma_desc *d = uc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) uc->bcnt += d->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) udma_start(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) vchan_cookie_complete(&d->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static irqreturn_t udma_ring_irq_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct udma_chan *uc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) dma_addr_t paddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (udma_pop_from_ring(uc, &paddr) || !paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) spin_lock_irqsave(&uc->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* Teardown completion message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (cppi5_desc_is_tdcm(paddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) complete_all(&uc->teardown_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (uc->terminated_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) udma_desc_free(&uc->terminated_desc->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) uc->terminated_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (!uc->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) udma_start(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) d = udma_udma_desc_from_paddr(uc, paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) d->desc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (desc_paddr != paddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) dev_err(uc->ud->dev, "not matching descriptors!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (d == uc->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* active descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (uc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) udma_cyclic_packet_elapsed(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) vchan_cyclic_callback(&d->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (udma_is_desc_really_done(uc, d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) uc->bcnt += d->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) udma_start(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) vchan_cookie_complete(&d->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) schedule_delayed_work(&uc->tx_drain.work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * terminated descriptor, mark the descriptor as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * completed to update the channel's cookie marker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) dma_cookie_complete(&d->vd.tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) spin_unlock_irqrestore(&uc->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static irqreturn_t udma_udma_irq_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) struct udma_chan *uc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) spin_lock_irqsave(&uc->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) d = uc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) d->tr_idx = (d->tr_idx + 1) % d->sglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (uc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) vchan_cyclic_callback(&d->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* TODO: figure out the real amount of data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) uc->bcnt += d->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) udma_start(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) vchan_cookie_complete(&d->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) spin_unlock_irqrestore(&uc->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * @ud: UDMA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * @from: Start the search from this flow id number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * @cnt: Number of consecutive flow ids to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * Allocate range of RX flow ids for future use, those flows can be requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * only using explicit flow id number. if @from is set to -1 it will try to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * first free range. if @from is positive value it will force allocation only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * of the specified range of flows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * Returns -ENOMEM if can't find free range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * -EEXIST if requested range is busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * -EINVAL if wrong input values passed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Returns flow id on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) int start, tmp_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) tmp_from = from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (tmp_from < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) tmp_from = ud->rchan_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* default flows can't be allocated and accessible only by id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (tmp_from < ud->rchan_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (tmp_from + cnt > ud->rflow_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) ud->rflow_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) start = bitmap_find_next_zero_area(tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ud->rflow_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) tmp_from, cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (start >= ud->rflow_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (from >= 0 && start != from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (from < ud->rchan_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (from + cnt > ud->rflow_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * Attempt to request rflow by ID can be made for any rflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * if not in use with assumption that caller knows what's doing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * TI-SCI FW will perform additional permission check ant way, it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (id < 0 || id >= ud->rflow_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (test_bit(id, ud->rflow_in_use))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /* GP rflow has to be allocated first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (!test_bit(id, ud->rflow_gp_map) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) !test_bit(id, ud->rflow_gp_map_allocated))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) dev_dbg(ud->dev, "get rflow%d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) set_bit(id, ud->rflow_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return &ud->rflows[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (!test_bit(rflow->id, ud->rflow_in_use)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) clear_bit(rflow->id, ud->rflow_in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) #define UDMA_RESERVE_RESOURCE(res) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) enum udma_tp_level tpl, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) int id) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (id >= 0) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (test_bit(id, ud->res##_map)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) dev_err(ud->dev, "res##%d is in use\n", id); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return ERR_PTR(-ENOENT); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) } else { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) int start; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (tpl >= ud->tpl_levels) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) tpl = ud->tpl_levels - 1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) start = ud->tpl_start_idx[tpl]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) start); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (id == ud->res##_cnt) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return ERR_PTR(-ENOENT); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) set_bit(id, ud->res##_map); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return &ud->res##s[id]; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) UDMA_RESERVE_RESOURCE(tchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) UDMA_RESERVE_RESOURCE(rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static int udma_get_tchan(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (uc->tchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) uc->id, uc->tchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return PTR_ERR_OR_ZERO(uc->tchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static int udma_get_rchan(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (uc->rchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) uc->id, uc->rchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return PTR_ERR_OR_ZERO(uc->rchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static int udma_get_chan_pair(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) int chan_id, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) uc->id, uc->tchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (uc->tchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) uc->id, uc->tchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) } else if (uc->rchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) uc->id, uc->rchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /* Can be optimized, but let's have it like this for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) end = min(ud->tchan_cnt, ud->rchan_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) chan_id = ud->tpl_start_idx[ud->tpl_levels - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) for (; chan_id < end; chan_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (!test_bit(chan_id, ud->tchan_map) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) !test_bit(chan_id, ud->rchan_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (chan_id == end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) set_bit(chan_id, ud->tchan_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) set_bit(chan_id, ud->rchan_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) uc->tchan = &ud->tchans[chan_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) uc->rchan = &ud->rchans[chan_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static int udma_get_rflow(struct udma_chan *uc, int flow_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (!uc->rchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (uc->rflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) uc->id, uc->rflow->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) uc->rflow = __udma_get_rflow(ud, flow_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return PTR_ERR_OR_ZERO(uc->rflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static void udma_put_rchan(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (uc->rchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) uc->rchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) clear_bit(uc->rchan->id, ud->rchan_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) uc->rchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static void udma_put_tchan(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (uc->tchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) uc->tchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) clear_bit(uc->tchan->id, ud->tchan_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) uc->tchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) static void udma_put_rflow(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (uc->rflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) uc->rflow->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) __udma_put_rflow(ud, uc->rflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) uc->rflow = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static void udma_free_tx_resources(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (!uc->tchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) k3_ringacc_ring_free(uc->tchan->t_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) k3_ringacc_ring_free(uc->tchan->tc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) uc->tchan->t_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) uc->tchan->tc_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) udma_put_tchan(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static int udma_alloc_tx_resources(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct k3_ring_cfg ring_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) ret = udma_get_tchan(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) &uc->tchan->t_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) &uc->tchan->tc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) goto err_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) memset(&ring_cfg, 0, sizeof(ring_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) goto err_ringcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) err_ringcfg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) k3_ringacc_ring_free(uc->tchan->tc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) uc->tchan->tc_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) k3_ringacc_ring_free(uc->tchan->t_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) uc->tchan->t_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) err_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) udma_put_tchan(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static void udma_free_rx_resources(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (!uc->rchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (uc->rflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct udma_rflow *rflow = uc->rflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) k3_ringacc_ring_free(rflow->fd_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) k3_ringacc_ring_free(rflow->r_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) rflow->fd_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) rflow->r_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) udma_put_rflow(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) udma_put_rchan(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static int udma_alloc_rx_resources(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) struct k3_ring_cfg ring_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct udma_rflow *rflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) int fd_ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) ret = udma_get_rchan(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /* For MEM_TO_MEM we don't need rflow or rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (uc->config.dir == DMA_MEM_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) ret = udma_get_rflow(uc, uc->rchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) goto err_rflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) rflow = uc->rflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) &rflow->fd_ring, &rflow->r_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) goto err_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) memset(&ring_cfg, 0, sizeof(ring_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (uc->config.pkt_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) ring_cfg.size = SG_MAX_SEGMENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) goto err_ringcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) err_ringcfg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) k3_ringacc_ring_free(rflow->r_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) rflow->r_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) k3_ringacc_ring_free(rflow->fd_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) rflow->fd_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) err_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) udma_put_rflow(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) err_rflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) udma_put_rchan(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) #define TISCI_TCHAN_VALID_PARAMS ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) #define TISCI_RCHAN_VALID_PARAMS ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) struct udma_tchan *tchan = uc->tchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct udma_rchan *rchan = uc->rchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /* Non synchronized - mem to mem type of transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) req_tx.nav_id = tisci_rm->tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) req_tx.index = tchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) req_tx.txcq_qnum = tc_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) req_tx.tx_atype = ud->atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) req_rx.nav_id = tisci_rm->tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) req_rx.index = rchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) req_rx.rxcq_qnum = tc_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) req_rx.rx_atype = ud->atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) static int udma_tisci_tx_channel_config(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct udma_tchan *tchan = uc->tchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) u32 mode, fetch_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (uc->config.pkt_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) uc->config.psd_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) fetch_size = sizeof(struct cppi5_desc_hdr_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) req_tx.nav_id = tisci_rm->tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) req_tx.index = tchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) req_tx.tx_chan_type = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) req_tx.tx_supr_tdpkt = uc->config.notdpkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) req_tx.tx_fetch_size = fetch_size >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) req_tx.txcq_qnum = tc_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) req_tx.tx_atype = uc->config.atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) static int udma_tisci_rx_channel_config(struct udma_chan *uc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct udma_dev *ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct udma_rchan *rchan = uc->rchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) u32 mode, fetch_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (uc->config.pkt_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) uc->config.psd_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) fetch_size = sizeof(struct cppi5_desc_hdr_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) req_rx.nav_id = tisci_rm->tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) req_rx.index = rchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) req_rx.rx_fetch_size = fetch_size >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) req_rx.rxcq_qnum = rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) req_rx.rx_chan_type = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) req_rx.rx_atype = uc->config.atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) flow_req.valid_params =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) flow_req.nav_id = tisci_rm->tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) flow_req.flow_index = rchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (uc->config.needs_epib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) flow_req.rx_einfo_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) flow_req.rx_einfo_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (uc->config.psd_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) flow_req.rx_psinfo_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) flow_req.rx_psinfo_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) flow_req.rx_error_handling = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) flow_req.rx_dest_qnum = rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) flow_req.rx_fdq0_sz0_qnum = fd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) flow_req.rx_fdq1_qnum = fd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) flow_req.rx_fdq2_qnum = fd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) flow_req.rx_fdq3_qnum = fd_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) static int udma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct udma_dev *ud = to_udma_dev(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) const struct udma_soc_data *soc_data = ud->soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct k3_ring *irq_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) u32 irq_udma_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) uc->use_dma_pool = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /* in case of MEM_TO_MEM we have maximum of two TRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (uc->config.dir == DMA_MEM_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) uc->config.hdesc_size = cppi5_trdesc_calc_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) sizeof(struct cppi5_tr_type15_t), 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) uc->config.pkt_mode = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (uc->use_dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) uc->config.hdesc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) ud->desc_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (!uc->hdesc_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) dev_err(ud->ddev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) "Descriptor pool allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) uc->use_dma_pool = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) goto err_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * Make sure that the completion is in a known state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * No teardown, the channel is idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) reinit_completion(&uc->teardown_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) complete_all(&uc->teardown_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) uc->state = UDMA_CHAN_IS_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) switch (uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) /* Non synchronized - mem to mem type of transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) uc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) ret = udma_get_chan_pair(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) goto err_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) ret = udma_alloc_tx_resources(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) udma_put_rchan(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) goto err_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) ret = udma_alloc_rx_resources(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) udma_free_tx_resources(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) goto err_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) uc->config.src_thread = ud->psil_base + uc->tchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) K3_PSIL_DST_THREAD_ID_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) irq_ring = uc->tchan->tc_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) irq_udma_idx = uc->tchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) ret = udma_tisci_m2m_channel_config(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /* Slave transfer synchronized - mem to dev (TX) trasnfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) uc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) ret = udma_alloc_tx_resources(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) goto err_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) uc->config.src_thread = ud->psil_base + uc->tchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) uc->config.dst_thread = uc->config.remote_thread_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) irq_ring = uc->tchan->tc_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) irq_udma_idx = uc->tchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) ret = udma_tisci_tx_channel_config(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /* Slave transfer synchronized - dev to mem (RX) trasnfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) uc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) ret = udma_alloc_rx_resources(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) goto err_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) uc->config.src_thread = uc->config.remote_thread_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) K3_PSIL_DST_THREAD_ID_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) irq_ring = uc->rflow->r_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) ret = udma_tisci_rx_channel_config(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) /* Can not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) __func__, uc->id, uc->config.dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) goto err_cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) /* check if the channel configuration was successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) goto err_res_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (udma_is_chan_running(uc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) udma_reset_chan(uc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (udma_is_chan_running(uc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) goto err_res_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) /* PSI-L pairing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) uc->config.src_thread, uc->config.dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) goto err_res_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) uc->psil_paired = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (uc->irq_num_ring <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) k3_ringacc_get_ring_id(irq_ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) goto err_psi_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) IRQF_TRIGGER_HIGH, uc->name, uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) goto err_irq_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) /* Event from UDMA (TR events) only needed for slave TR mode channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) irq_udma_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (uc->irq_num_udma <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) irq_udma_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) free_irq(uc->irq_num_ring, uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) goto err_irq_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) uc->name, uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) uc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) free_irq(uc->irq_num_ring, uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) goto err_irq_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) uc->irq_num_udma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) udma_reset_rings(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) err_irq_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) uc->irq_num_ring = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) uc->irq_num_udma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) err_psi_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) uc->psil_paired = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) err_res_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) udma_free_tx_resources(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) udma_free_rx_resources(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) err_cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) udma_reset_uchan(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (uc->use_dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) dma_pool_destroy(uc->hdesc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) uc->use_dma_pool = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) static int udma_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) struct dma_slave_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) size_t tr_size, int tr_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) enum dma_transfer_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) struct udma_hwdesc *hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) struct cppi5_desc_hdr_t *tr_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) u32 reload_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) u32 ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) switch (tr_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) case 128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) /* We have only one descriptor containing multiple TRs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) d->sglen = tr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) d->hwdesc_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) hwdesc = &d->hwdesc[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) /* Allocate memory for DMA ring descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) if (uc->use_dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) hwdesc->cppi5_desc_size = uc->config.hdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) GFP_NOWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) &hwdesc->cppi5_desc_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) tr_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) uc->ud->desc_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) hwdesc->cppi5_desc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) &hwdesc->cppi5_desc_paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (!hwdesc->cppi5_desc_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) /* Start of the TR req records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) /* Start address of the TR response array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) tr_desc = hwdesc->cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (uc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) cppi5_desc_set_pktids(tr_desc, uc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) CPPI5_INFO1_DESC_FLOWID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) * udma_get_tr_counters - calculate TR counters for a given length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) * @len: Length of the trasnfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * @align_to: Preferred alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * @tr0_cnt0: First TR icnt0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * @tr0_cnt1: First TR icnt1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * @tr1_cnt0: Second (if used) TR icnt0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * For len >= SZ_64K two TRs are used in a simple way:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * Second TR: the remaining length (tr1_cnt0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * Returns the number of TRs the length needs (1 or 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * -EINVAL if the length can not be supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) static int udma_get_tr_counters(size_t len, unsigned long align_to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (len < SZ_64K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) *tr0_cnt0 = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) *tr0_cnt1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (align_to > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) align_to = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) realign:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) *tr0_cnt0 = SZ_64K - BIT(align_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (len / *tr0_cnt0 >= SZ_64K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) if (align_to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) align_to--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) goto realign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) *tr0_cnt1 = len / *tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) *tr1_cnt0 = len % *tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) static struct udma_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) unsigned int sglen, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) unsigned long tx_flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) struct scatterlist *sgent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) struct cppi5_tr_type1_t *tr_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) size_t tr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) int num_tr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) int tr_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) /* estimate the number of TRs we will need */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) for_each_sg(sgl, sgent, sglen, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (sg_dma_len(sgent) < SZ_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) num_tr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) num_tr += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) /* Now allocate and setup the descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) tr_size = sizeof(struct cppi5_tr_type1_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) d->sglen = sglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) tr_req = d->hwdesc[0].tr_req_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) for_each_sg(sgl, sgent, sglen, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) dma_addr_t sg_addr = sg_dma_address(sgent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if (num_tr < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) dev_err(uc->ud->dev, "size %u is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) sg_dma_len(sgent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) udma_free_hwdesc(uc, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) tr_req[tr_idx].addr = sg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) tr_req[tr_idx].icnt0 = tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) tr_req[tr_idx].icnt1 = tr0_cnt1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) tr_req[tr_idx].dim1 = tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) tr_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (num_tr == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) false, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) cppi5_tr_csf_set(&tr_req[tr_idx].flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) CPPI5_TR_CSF_SUPR_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) tr_req[tr_idx].icnt0 = tr1_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) tr_req[tr_idx].icnt1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) tr_req[tr_idx].dim1 = tr1_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) tr_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) d->residue += sg_dma_len(sgent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) enum dma_slave_buswidth dev_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) u16 elcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (uc->config.ep_type != PSIL_EP_PDMA_XY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) /* Bus width translates to the element size (ES) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) switch (dev_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) case DMA_SLAVE_BUSWIDTH_1_BYTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) d->static_tr.elsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) case DMA_SLAVE_BUSWIDTH_2_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) d->static_tr.elsize = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) case DMA_SLAVE_BUSWIDTH_3_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) d->static_tr.elsize = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) case DMA_SLAVE_BUSWIDTH_4_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) d->static_tr.elsize = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) case DMA_SLAVE_BUSWIDTH_8_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) d->static_tr.elsize = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) default: /* not reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) d->static_tr.elcnt = elcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * PDMA must to close the packet when the channel is in packet mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) * For TR mode when the channel is not cyclic we also need PDMA to close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) * the packet otherwise the transfer will stall because PDMA holds on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * the data it has received from the peripheral.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (uc->config.pkt_mode || !uc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) unsigned int div = dev_width * elcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (uc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) d->static_tr.bstcnt = d->residue / d->sglen / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) d->static_tr.bstcnt = d->residue / div;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (uc->config.dir == DMA_DEV_TO_MEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) d->static_tr.bstcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) static struct udma_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) unsigned int sglen, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) unsigned long tx_flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) struct scatterlist *sgent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) struct cppi5_host_desc_t *h_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) u32 ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) d->sglen = sglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) d->hwdesc_count = sglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) for_each_sg(sgl, sgent, sglen, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) struct udma_hwdesc *hwdesc = &d->hwdesc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) dma_addr_t sg_addr = sg_dma_address(sgent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct cppi5_host_desc_t *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) size_t sg_len = sg_dma_len(sgent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) GFP_NOWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) &hwdesc->cppi5_desc_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (!hwdesc->cppi5_desc_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) dev_err(uc->ud->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) "descriptor%d allocation failed\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) udma_free_hwdesc(uc, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) d->residue += sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) hwdesc->cppi5_desc_size = uc->config.hdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) desc = hwdesc->cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) cppi5_hdesc_init(desc, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) /* Flow and Packed ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) cppi5_desc_set_pktids(&desc->hdr, uc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) CPPI5_INFO1_DESC_FLOWID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) cppi5_hdesc_reset_hbdesc(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) /* attach the sg buffer to the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) /* Attach link as host buffer descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (h_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) cppi5_hdesc_link_hbdesc(h_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) hwdesc->cppi5_desc_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) h_desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (d->residue >= SZ_4M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) dev_err(uc->ud->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) "%s: Transfer size %u is over the supported 4M range\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) __func__, d->residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) udma_free_hwdesc(uc, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) h_desc = d->hwdesc[0].cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) cppi5_hdesc_set_pktlen(h_desc, d->residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) void *data, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) struct udma_desc *d = to_udma_desc(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) struct udma_chan *uc = to_udma_chan(desc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) struct cppi5_host_desc_t *h_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) u32 psd_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (!uc->config.pkt_mode || !uc->config.metadata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (!data || len > uc->config.metadata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) h_desc = d->hwdesc[0].cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (d->dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) memcpy(h_desc->epib, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (uc->config.needs_epib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) d->metadata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) d->metadata_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if (uc->config.needs_epib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) cppi5_hdesc_update_flags(h_desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) cppi5_hdesc_update_psdata_size(h_desc, psd_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) size_t *payload_len, size_t *max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) struct udma_desc *d = to_udma_desc(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) struct udma_chan *uc = to_udma_chan(desc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) struct cppi5_host_desc_t *h_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (!uc->config.pkt_mode || !uc->config.metadata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return ERR_PTR(-ENOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) h_desc = d->hwdesc[0].cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) *max_len = uc->config.metadata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) return h_desc->epib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) size_t payload_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) struct udma_desc *d = to_udma_desc(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) struct udma_chan *uc = to_udma_chan(desc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) struct cppi5_host_desc_t *h_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) u32 psd_size = payload_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) u32 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) if (!uc->config.pkt_mode || !uc->config.metadata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (payload_len > uc->config.metadata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) h_desc = d->hwdesc[0].cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) if (uc->config.needs_epib) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) cppi5_hdesc_update_flags(h_desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) cppi5_hdesc_update_psdata_size(h_desc, psd_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) static struct dma_descriptor_metadata_ops metadata_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) .attach = udma_attach_metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) .get_ptr = udma_get_metadata_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) .set_len = udma_set_metadata_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) unsigned int sglen, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) unsigned long tx_flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) enum dma_slave_buswidth dev_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) u32 burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (dir != uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) dev_err(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) "%s: chan%d is for %s, not supporting %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) __func__, uc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) dmaengine_get_direction_text(uc->config.dir),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) dmaengine_get_direction_text(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) dev_width = uc->cfg.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) burst = uc->cfg.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) } else if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) dev_width = uc->cfg.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) burst = uc->cfg.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) if (!burst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) burst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (uc->config.pkt_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) d->dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) d->desc_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) d->tr_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) /* static TR for remote PDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (udma_configure_statictr(uc, d, dev_width, burst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) dev_err(uc->ud->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) __func__, d->static_tr.bstcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) udma_free_hwdesc(uc, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) if (uc->config.metadata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) d->vd.tx.metadata_ops = &metadata_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) static struct udma_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) size_t buf_len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) enum dma_transfer_direction dir, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) size_t tr_size, period_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) struct cppi5_tr_type1_t *tr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) unsigned int periods = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) int num_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) &tr0_cnt1, &tr1_cnt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) if (num_tr < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) dev_err(uc->ud->dev, "size %zu is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) period_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) /* Now allocate and setup the descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) tr_size = sizeof(struct cppi5_tr_type1_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) tr_req = d->hwdesc[0].tr_req_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) period_addr = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) for (i = 0; i < periods; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) int tr_idx = i * num_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) tr_req[tr_idx].addr = period_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) tr_req[tr_idx].icnt0 = tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) tr_req[tr_idx].icnt1 = tr0_cnt1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) tr_req[tr_idx].dim1 = tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (num_tr == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) cppi5_tr_csf_set(&tr_req[tr_idx].flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) CPPI5_TR_CSF_SUPR_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) tr_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) false, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) tr_req[tr_idx].icnt0 = tr1_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) tr_req[tr_idx].icnt1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) tr_req[tr_idx].dim1 = tr1_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (!(flags & DMA_PREP_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) cppi5_tr_csf_set(&tr_req[tr_idx].flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) CPPI5_TR_CSF_SUPR_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) period_addr += period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) static struct udma_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) size_t buf_len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) enum dma_transfer_direction dir, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) u32 ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) int periods = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (period_len >= SZ_4M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) d->hwdesc_count = periods;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) /* TODO: re-check this... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) if (dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) for (i = 0; i < periods; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) struct udma_hwdesc *hwdesc = &d->hwdesc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) dma_addr_t period_addr = buf_addr + (period_len * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) struct cppi5_host_desc_t *h_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) GFP_NOWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) &hwdesc->cppi5_desc_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (!hwdesc->cppi5_desc_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) dev_err(uc->ud->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) "descriptor%d allocation failed\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) udma_free_hwdesc(uc, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) hwdesc->cppi5_desc_size = uc->config.hdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) h_desc = hwdesc->cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) cppi5_hdesc_init(h_desc, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) cppi5_hdesc_set_pktlen(h_desc, period_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) /* Flow and Packed ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) CPPI5_INFO1_DESC_FLOWID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) /* attach each period to a new descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) cppi5_hdesc_attach_buf(h_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) period_addr, period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) period_addr, period_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) return d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) size_t period_len, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) enum dma_slave_buswidth dev_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) u32 burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) if (dir != uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) dev_err(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) "%s: chan%d is for %s, not supporting %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) __func__, uc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) dmaengine_get_direction_text(uc->config.dir),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) dmaengine_get_direction_text(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) uc->cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) dev_width = uc->cfg.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) burst = uc->cfg.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) } else if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) dev_width = uc->cfg.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) burst = uc->cfg.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (!burst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) burst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (uc->config.pkt_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) dir, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) dir, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) d->sglen = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) d->dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) d->residue = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) /* static TR for remote PDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) if (udma_configure_statictr(uc, d, dev_width, burst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) dev_err(uc->ud->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) __func__, d->static_tr.bstcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) udma_free_hwdesc(uc, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (uc->config.metadata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) d->vd.tx.metadata_ops = &metadata_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) return vchan_tx_prep(&uc->vc, &d->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) size_t len, unsigned long tx_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) struct cppi5_tr_type15_t *tr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) int num_tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) size_t tr_size = sizeof(struct cppi5_tr_type15_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (uc->config.dir != DMA_MEM_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) dev_err(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) "%s: chan%d is for %s, not supporting %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) __func__, uc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) dmaengine_get_direction_text(uc->config.dir),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) dmaengine_get_direction_text(DMA_MEM_TO_MEM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) &tr0_cnt1, &tr1_cnt0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (num_tr < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) dev_err(uc->ud->dev, "size %zu is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) d->dir = DMA_MEM_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) d->desc_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) d->tr_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) d->residue = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) tr_req = d->hwdesc[0].tr_req_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) tr_req[0].addr = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) tr_req[0].icnt0 = tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) tr_req[0].icnt1 = tr0_cnt1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) tr_req[0].icnt2 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) tr_req[0].icnt3 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) tr_req[0].dim1 = tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) tr_req[0].daddr = dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) tr_req[0].dicnt0 = tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) tr_req[0].dicnt1 = tr0_cnt1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) tr_req[0].dicnt2 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) tr_req[0].dicnt3 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) tr_req[0].ddim1 = tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) if (num_tr == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) tr_req[1].icnt0 = tr1_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) tr_req[1].icnt1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) tr_req[1].icnt2 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) tr_req[1].icnt3 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) tr_req[1].dicnt0 = tr1_cnt0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) tr_req[1].dicnt1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) tr_req[1].dicnt2 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) tr_req[1].dicnt3 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (uc->config.metadata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) d->vd.tx.metadata_ops = &metadata_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) static void udma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) spin_lock_irqsave(&uc->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) /* If we have something pending and no active descriptor, then */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) if (vchan_issue_pending(&uc->vc) && !uc->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) * start a descriptor if the channel is NOT [marked as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) * terminating _and_ it is still running (teardown has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) * completed yet)].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) udma_is_chan_running(uc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) udma_start(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) spin_unlock_irqrestore(&uc->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) static enum dma_status udma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) spin_lock_irqsave(&uc->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) if (!udma_is_chan_running(uc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) ret = DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) ret = DMA_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (ret == DMA_COMPLETE || !txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) u32 peer_bcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) u32 bcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) u32 residue = uc->desc->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) u32 delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (uc->desc->dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) if (uc->config.ep_type != PSIL_EP_NATIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) peer_bcnt = udma_tchanrt_read(uc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) UDMA_CHAN_RT_PEER_BCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (bcnt > peer_bcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) delay = bcnt - peer_bcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) if (uc->config.ep_type != PSIL_EP_NATIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) peer_bcnt = udma_rchanrt_read(uc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) UDMA_CHAN_RT_PEER_BCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (peer_bcnt > bcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) delay = peer_bcnt - bcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) bcnt -= uc->bcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) if (bcnt && !(bcnt % uc->desc->residue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) residue -= bcnt % uc->desc->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) ret = DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) dma_set_residue(txstate, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) dma_set_in_flight_bytes(txstate, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) ret = DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) spin_unlock_irqrestore(&uc->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) static int udma_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) /* pause the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) switch (uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) UDMA_PEER_RT_EN_PAUSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) UDMA_PEER_RT_EN_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) UDMA_PEER_RT_EN_PAUSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) UDMA_PEER_RT_EN_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) UDMA_CHAN_RT_CTL_PAUSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) UDMA_CHAN_RT_CTL_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) static int udma_resume(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) /* resume the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) switch (uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) UDMA_PEER_RT_EN_PAUSE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) UDMA_PEER_RT_EN_PAUSE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) UDMA_CHAN_RT_CTL_PAUSE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) static int udma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) spin_lock_irqsave(&uc->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) if (udma_is_chan_running(uc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) udma_stop(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (uc->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) uc->terminated_desc = uc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) uc->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) uc->terminated_desc->terminated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) cancel_delayed_work(&uc->tx_drain.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) uc->paused = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) vchan_get_all_descriptors(&uc->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) spin_unlock_irqrestore(&uc->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) vchan_dma_desc_free_list(&uc->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) static void udma_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) unsigned long timeout = msecs_to_jiffies(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) vchan_synchronize(&uc->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (uc->state == UDMA_CHAN_IS_TERMINATING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) timeout = wait_for_completion_timeout(&uc->teardown_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) if (!timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) uc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) udma_dump_chan_stdata(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) udma_reset_chan(uc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) udma_reset_chan(uc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) if (udma_is_chan_running(uc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) cancel_delayed_work_sync(&uc->tx_drain.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) udma_reset_rings(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) static void udma_desc_pre_callback(struct virt_dma_chan *vc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) struct virt_dma_desc *vd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) struct dmaengine_result *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) struct udma_chan *uc = to_udma_chan(&vc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) struct udma_desc *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (!vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) d = to_udma_desc(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (d->metadata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) udma_fetch_epib(uc, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) /* Provide residue information for the client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) if (cppi5_desc_get_type(desc_vaddr) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) result->residue = d->residue -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) cppi5_hdesc_get_pktlen(desc_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (result->residue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) result->result = DMA_TRANS_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) result->result = DMA_TRANS_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) result->residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) result->result = DMA_TRANS_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) * This tasklet handles the completion of a DMA descriptor by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) * calling its callback and freeing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) static void udma_vchan_complete(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) struct virt_dma_chan *vc = from_tasklet(vc, t, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) struct virt_dma_desc *vd, *_vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) spin_lock_irq(&vc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) list_splice_tail_init(&vc->desc_completed, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) vd = vc->cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) if (vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) vc->cyclic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) dmaengine_desc_get_callback(&vd->tx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) memset(&cb, 0, sizeof(cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) spin_unlock_irq(&vc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) udma_desc_pre_callback(vc, vd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) list_for_each_entry_safe(vd, _vd, &head, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) struct dmaengine_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) dmaengine_desc_get_callback(&vd->tx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) udma_desc_pre_callback(vc, vd, &result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) dmaengine_desc_callback_invoke(&cb, &result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) vchan_vdesc_fini(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) static void udma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) struct udma_dev *ud = to_udma_dev(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) udma_terminate_all(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) if (uc->terminated_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) udma_reset_chan(uc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) udma_reset_rings(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) cancel_delayed_work_sync(&uc->tx_drain.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (uc->irq_num_ring > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) free_irq(uc->irq_num_ring, uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) uc->irq_num_ring = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (uc->irq_num_udma > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) free_irq(uc->irq_num_udma, uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) uc->irq_num_udma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) /* Release PSI-L pairing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) if (uc->psil_paired) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) navss_psil_unpair(ud, uc->config.src_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) uc->config.dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) uc->psil_paired = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) vchan_free_chan_resources(&uc->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) tasklet_kill(&uc->vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) udma_free_tx_resources(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) udma_free_rx_resources(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) udma_reset_uchan(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) if (uc->use_dma_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) dma_pool_destroy(uc->hdesc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) uc->use_dma_pool = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) static struct platform_driver udma_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) struct udma_filter_param {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) int remote_thread_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) u32 atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) struct udma_chan_config *ucc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) struct psil_endpoint_config *ep_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) struct udma_filter_param *filter_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) struct udma_chan *uc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) struct udma_dev *ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) if (chan->device->dev->driver != &udma_driver.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) ucc = &uc->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) ud = uc->ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) filter_param = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) if (filter_param->atype > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) dev_err(ud->dev, "Invalid channel atype: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) filter_param->atype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) ucc->remote_thread_id = filter_param->remote_thread_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) ucc->atype = filter_param->atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) ucc->dir = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) ucc->dir = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) ep_config = psil_get_ep_config(ucc->remote_thread_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) if (IS_ERR(ep_config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) ucc->remote_thread_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) ucc->dir = DMA_MEM_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) ucc->remote_thread_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) ucc->atype = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) ucc->pkt_mode = ep_config->pkt_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) ucc->channel_tpl = ep_config->channel_tpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) ucc->notdpkt = ep_config->notdpkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) ucc->ep_type = ep_config->ep_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) if (ucc->ep_type != PSIL_EP_NATIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) const struct udma_match_data *match_data = ud->match_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) ucc->enable_acc32 = ep_config->pdma_acc32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) if (match_data->flags & UDMA_FLAG_PDMA_BURST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) ucc->enable_burst = ep_config->pdma_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) ucc->needs_epib = ep_config->needs_epib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) ucc->psd_size = ep_config->psd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) ucc->metadata_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) ucc->psd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) if (ucc->pkt_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) ucc->metadata_size, ud->desc_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) struct udma_dev *ud = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) dma_cap_mask_t mask = ud->ddev.cap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) struct udma_filter_param filter_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) filter_param.remote_thread_id = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) if (dma_spec->args_count == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) filter_param.atype = dma_spec->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) filter_param.atype = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) ofdma->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) if (!chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) dev_err(ud->dev, "get channel fail in %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) static struct udma_match_data am654_main_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) .psil_base = 0x1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) .enable_memcpy_support = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) .statictr_z_mask = GENMASK(11, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) static struct udma_match_data am654_mcu_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) .psil_base = 0x6000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) .enable_memcpy_support = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) .statictr_z_mask = GENMASK(11, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) static struct udma_match_data j721e_main_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) .psil_base = 0x1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) .enable_memcpy_support = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) .statictr_z_mask = GENMASK(23, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) static struct udma_match_data j721e_mcu_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) .psil_base = 0x6000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) .statictr_z_mask = GENMASK(23, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) static const struct of_device_id udma_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) .compatible = "ti,am654-navss-main-udmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) .data = &am654_main_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) .compatible = "ti,am654-navss-mcu-udmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) .data = &am654_mcu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) .compatible = "ti,j721e-navss-main-udmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) .data = &j721e_main_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) }, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) .compatible = "ti,j721e-navss-mcu-udmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) .data = &j721e_mcu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) { /* Sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) static struct udma_soc_data am654_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) .rchan_oes_offset = 0x200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) static struct udma_soc_data j721e_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) .rchan_oes_offset = 0x400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) static struct udma_soc_data j7200_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) .rchan_oes_offset = 0x80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) static const struct soc_device_attribute k3_soc_devices[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) { .family = "AM65X", .data = &am654_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) { .family = "J721E", .data = &j721e_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) { .family = "J7200", .data = &j7200_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) for (i = 0; i < MMR_LAST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) if (IS_ERR(ud->mmrs[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) return PTR_ERR(ud->mmrs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) static int udma_setup_resources(struct udma_dev *ud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) struct device *dev = ud->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) int ch_count, ret, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) u32 cap2, cap3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) struct ti_sci_resource_desc *rm_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) struct ti_sci_resource *rm_res, irq_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) static const char * const range_names[] = { "ti,sci-rm-range-tchan",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) "ti,sci-rm-range-rchan",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) "ti,sci-rm-range-rflow" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) cap2 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) cap3 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) ch_count = ud->tchan_cnt + ud->rchan_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) /* Set up the throughput level start indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) if (of_device_is_compatible(dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) "ti,am654-navss-main-udmap")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) ud->tpl_levels = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) ud->tpl_start_idx[0] = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) } else if (of_device_is_compatible(dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) "ti,am654-navss-mcu-udmap")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) ud->tpl_levels = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) ud->tpl_start_idx[0] = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) ud->tpl_levels = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) ud->tpl_levels = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) ud->tpl_levels = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) sizeof(unsigned long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) sizeof(unsigned long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) ud->rflow_gp_map_allocated = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) BITS_TO_LONGS(ud->rflow_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) !ud->rflows || !ud->rflow_in_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) * RX flows with the same Ids as RX channels are reserved to be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) * as default flows if remote HW can't generate flow_ids. Those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) * RX flows can be requested only explicitly by id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) /* by default no GP rflows are assigned to Linux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) /* Get resource ranges from tisci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) for (i = 0; i < RM_RANGE_LAST; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) tisci_rm->rm_ranges[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) tisci_rm->tisci_dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) (char *)range_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) /* tchan ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) if (IS_ERR(rm_res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) bitmap_zero(ud->tchan_map, ud->tchan_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) bitmap_fill(ud->tchan_map, ud->tchan_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) for (i = 0; i < rm_res->sets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) rm_desc = &rm_res->desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) bitmap_clear(ud->tchan_map, rm_desc->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) rm_desc->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) rm_desc->start, rm_desc->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) irq_res.sets = rm_res->sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) /* rchan and matching default flow ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) if (IS_ERR(rm_res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) bitmap_zero(ud->rchan_map, ud->rchan_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) bitmap_fill(ud->rchan_map, ud->rchan_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) for (i = 0; i < rm_res->sets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) rm_desc = &rm_res->desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) bitmap_clear(ud->rchan_map, rm_desc->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) rm_desc->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) rm_desc->start, rm_desc->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) irq_res.sets += rm_res->sets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) for (i = 0; i < rm_res->sets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) irq_res.desc[i].start = rm_res->desc[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) irq_res.desc[i].num = rm_res->desc[i].num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) for (j = 0; j < rm_res->sets; j++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) irq_res.desc[i].start = rm_res->desc[j].start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) ud->soc_data->rchan_oes_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) irq_res.desc[i].num = rm_res->desc[j].num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) kfree(irq_res.desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) /* GP rflow ranges */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) if (IS_ERR(rm_res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) /* all gp flows are assigned exclusively to Linux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) ud->rflow_cnt - ud->rchan_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) for (i = 0; i < rm_res->sets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) rm_desc = &rm_res->desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) bitmap_clear(ud->rflow_gp_map, rm_desc->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) rm_desc->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) rm_desc->start, rm_desc->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) if (!ch_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) if (!ud->channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) ch_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) ud->rflow_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) return ch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) static int udma_setup_rx_flush(struct udma_dev *ud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) struct udma_rx_flush *rx_flush = &ud->rx_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) struct cppi5_desc_hdr_t *tr_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) struct cppi5_tr_type1_t *tr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) struct cppi5_host_desc_t *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) struct device *dev = ud->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) struct udma_hwdesc *hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) size_t tr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) /* Allocate 1K buffer for discarded data on RX channel teardown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) rx_flush->buffer_size = SZ_1K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) if (!rx_flush->buffer_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) rx_flush->buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) if (dma_mapping_error(dev, rx_flush->buffer_paddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) /* Set up descriptor to be used for TR mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) hwdesc = &rx_flush->hwdescs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) tr_size = sizeof(struct cppi5_tr_type1_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) ud->desc_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (!hwdesc->cppi5_desc_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) hwdesc->cppi5_desc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) /* Start of the TR req records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) /* Start address of the TR response array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) tr_desc = hwdesc->cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) cppi5_desc_set_retpolicy(tr_desc, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) tr_req = hwdesc->tr_req_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) tr_req->addr = rx_flush->buffer_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) tr_req->icnt0 = rx_flush->buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) tr_req->icnt1 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) /* Set up descriptor to be used for packet mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) hwdesc = &rx_flush->hwdescs[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) CPPI5_INFO0_HDESC_EPIB_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) ud->desc_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) if (!hwdesc->cppi5_desc_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) hwdesc->cppi5_desc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) desc = hwdesc->cppi5_desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) cppi5_hdesc_init(desc, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) cppi5_hdesc_attach_buf(desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) rx_flush->buffer_paddr, rx_flush->buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) rx_flush->buffer_paddr, rx_flush->buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) static void udma_dbg_summary_show_chan(struct seq_file *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) struct udma_chan *uc = to_udma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) struct udma_chan_config *ucc = &uc->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) seq_printf(s, " %-13s| %s", dma_chan_name(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) chan->dbg_client_name ?: "in-use");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) switch (uc->config.dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) case DMA_MEM_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) ucc->src_thread, ucc->dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) ucc->src_thread, ucc->dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) ucc->src_thread, ucc->dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) seq_printf(s, ")\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) if (ucc->ep_type == PSIL_EP_NATIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) seq_printf(s, "PSI-L Native");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) if (ucc->metadata_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) if (ucc->psd_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) seq_printf(s, " PSDsize:%u", ucc->psd_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) seq_printf(s, " ]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) seq_printf(s, "PDMA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) if (ucc->enable_acc32 || ucc->enable_burst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) seq_printf(s, "[%s%s ]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) ucc->enable_acc32 ? " ACC32" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) ucc->enable_burst ? " BURST" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) static void udma_dbg_summary_show(struct seq_file *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) struct dma_device *dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) list_for_each_entry(chan, &dma_dev->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) if (chan->client_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) udma_dbg_summary_show_chan(s, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) #endif /* CONFIG_DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) static int udma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) struct device_node *navss_node = pdev->dev.parent->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) const struct soc_device_attribute *soc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) struct udma_dev *ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) int ch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) dev_err(dev, "failed to set dma mask stuff\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) if (!ud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) ret = udma_get_mmrs(pdev, ud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) if (IS_ERR(ud->tisci_rm.tisci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) return PTR_ERR(ud->tisci_rm.tisci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) &ud->tisci_rm.tisci_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) pdev->id = ud->tisci_rm.tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) &ud->tisci_rm.tisci_navss_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) if (!ret && ud->atype > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) dev_err(dev, "Invalid atype: %u\n", ud->atype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) if (IS_ERR(ud->ringacc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) return PTR_ERR(ud->ringacc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) DOMAIN_BUS_TI_SCI_INTA_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) if (!dev->msi_domain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) dev_err(dev, "Failed to get MSI domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) match = of_match_node(udma_of_match, dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) if (!match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) dev_err(dev, "No compatible match found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) ud->match_data = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) soc = soc_device_match(k3_soc_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) if (!soc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) dev_err(dev, "No compatible SoC found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) ud->soc_data = soc->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) ud->ddev.device_config = udma_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) ud->ddev.device_issue_pending = udma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) ud->ddev.device_tx_status = udma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) ud->ddev.device_pause = udma_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) ud->ddev.device_resume = udma_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) ud->ddev.device_terminate_all = udma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) ud->ddev.device_synchronize = udma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) ud->ddev.dbg_summary_show = udma_dbg_summary_show;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) ud->ddev.device_free_chan_resources = udma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) DESC_METADATA_ENGINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) if (ud->match_data->enable_memcpy_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) ud->ddev.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) ud->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) ud->psil_base = ud->match_data->psil_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) INIT_LIST_HEAD(&ud->ddev.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) INIT_LIST_HEAD(&ud->desc_to_purge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) ch_count = udma_setup_resources(ud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) if (ch_count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) return ch_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) spin_lock_init(&ud->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) INIT_WORK(&ud->purge_work, udma_purge_desc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) ud->desc_align = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) if (ud->desc_align < dma_get_cache_alignment())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) ud->desc_align = dma_get_cache_alignment();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) ret = udma_setup_rx_flush(ud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) for (i = 0; i < ud->tchan_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) struct udma_tchan *tchan = &ud->tchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) tchan->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) for (i = 0; i < ud->rchan_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) struct udma_rchan *rchan = &ud->rchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) rchan->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) for (i = 0; i < ud->rflow_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) struct udma_rflow *rflow = &ud->rflows[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) rflow->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) for (i = 0; i < ch_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) struct udma_chan *uc = &ud->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) uc->ud = ud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) uc->vc.desc_free = udma_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) uc->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) uc->tchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) uc->rchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) uc->config.remote_thread_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) uc->config.dir = DMA_MEM_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) dev_name(dev), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) vchan_init(&uc->vc, &ud->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) /* Use custom vchan completion handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) tasklet_setup(&uc->vc.task, udma_vchan_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) init_completion(&uc->teardown_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) ret = dma_async_device_register(&ud->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) platform_set_drvdata(pdev, ud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) dev_err(dev, "failed to register of_dma controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) dma_async_device_unregister(&ud->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) static struct platform_driver udma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) .name = "ti-udma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) .of_match_table = udma_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) .suppress_bind_attrs = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) .probe = udma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) builtin_platform_driver(udma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) /* Private interfaces to UDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) #include "k3-udma-private.c"