^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * K3 NAVSS DMA glue interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/soc/ti/k3-ringacc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dma/ti-cppi5.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/dma/k3-udma-glue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "k3-udma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "k3-psil-priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct k3_udma_glue_common {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct udma_dev *udmax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) const struct udma_tisci_rm *tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct k3_ringacc *ringacc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u32 src_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u32 dst_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u32 hdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bool epib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u32 psdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u32 swdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u32 atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct k3_udma_glue_tx_channel {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct k3_udma_glue_common common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct udma_tchan *udma_tchanx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int udma_tchan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct k3_ring *ringtx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct k3_ring *ringtxcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) bool psil_paired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) atomic_t free_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) bool tx_pause_on_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) bool tx_filt_einfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) bool tx_filt_pswords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) bool tx_supr_tdpkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct k3_udma_glue_rx_flow {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct udma_rflow *udma_rflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) int udma_rflow_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct k3_ring *ringrx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct k3_ring *ringrxfdq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct k3_udma_glue_rx_channel {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct k3_udma_glue_common common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct udma_rchan *udma_rchanx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int udma_rchan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) bool remote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bool psil_paired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 swdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int flow_id_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct k3_udma_glue_rx_flow *flows;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 flow_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 flows_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int of_k3_udma_glue_parse(struct device_node *udmax_np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct k3_udma_glue_common *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) "ti,ringacc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (IS_ERR(common->ringacc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return PTR_ERR(common->ringacc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) common->udmax = of_xudma_dev_get(udmax_np, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (IS_ERR(common->udmax))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return PTR_ERR(common->udmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) const char *name, struct k3_udma_glue_common *common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) bool tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct psil_endpoint_config *ep_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct of_phandle_args dma_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 thread_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (unlikely(!name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) index = of_property_match_string(chn_np, "dma-names", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) &dma_spec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) thread_id = dma_spec.args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (dma_spec.args_count == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (dma_spec.args[1] > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) dev_err(common->dev, "Invalid channel atype: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) dma_spec.args[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) goto out_put_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) common->atype = dma_spec.args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) goto out_put_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto out_put_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* get psil endpoint config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ep_config = psil_get_ep_config(thread_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (IS_ERR(ep_config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dev_err(common->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) "No configuration for psi-l thread 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) thread_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ret = PTR_ERR(ep_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) goto out_put_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) common->epib = ep_config->needs_epib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) common->psdata_size = ep_config->psd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) common->dst_thread = thread_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) common->src_thread = thread_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ret = of_k3_udma_glue_parse(dma_spec.np, common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) out_put_spec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) of_node_put(dma_spec.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct device *dev = tx_chn->common.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) dev_dbg(dev, "dump_tx_chn:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) "udma_tchan_id: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) "src_thread: %08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) "dst_thread: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) tx_chn->udma_tchan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) tx_chn->common.src_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) tx_chn->common.dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) char *mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct device *dev = chn->common.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dev_dbg(dev, "=== dump ===> %s\n", mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) xudma_tchanrt_read(chn->udma_tchanx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) UDMA_CHAN_RT_PEER_RT_EN_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) memset(&req, 0, sizeof(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) req.nav_id = tisci_rm->tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) req.index = tx_chn->udma_tchan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (tx_chn->tx_pause_on_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) req.tx_pause_on_err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (tx_chn->tx_filt_einfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) req.tx_filt_einfo = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (tx_chn->tx_filt_pswords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) req.tx_filt_pswords = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (tx_chn->tx_supr_tdpkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) req.tx_supr_tdpkt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) req.tx_atype = tx_chn->common.atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct k3_udma_glue_tx_channel *tx_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (!tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) tx_chn->common.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) tx_chn->common.swdata_size = cfg->swdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* parse of udmap channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) &tx_chn->common, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) tx_chn->common.psdata_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) tx_chn->common.swdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* request and cfg UDMAP TX channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (IS_ERR(tx_chn->udma_tchanx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ret = PTR_ERR(tx_chn->udma_tchanx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) dev_err(dev, "UDMAX tchanx get err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* request and cfg rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) tx_chn->udma_tchan_id, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) &tx_chn->ringtx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) &tx_chn->ringtxcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) dev_err(dev, "Failed to cfg ringtx %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) dev_err(dev, "Failed to cfg ringtx %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* request and cfg psi-l */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) tx_chn->common.src_thread =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) xudma_dev_get_psil_base(tx_chn->common.udmax) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) tx_chn->udma_tchan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ret = k3_udma_glue_cfg_tx_chn(tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) dev_err(dev, "Failed to cfg tchan %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ret = xudma_navss_psil_pair(tx_chn->common.udmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) tx_chn->common.src_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) tx_chn->common.dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dev_err(dev, "PSI-L request err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) tx_chn->psil_paired = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* reset TX RT registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) k3_udma_glue_disable_tx_chn(tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) k3_udma_glue_dump_tx_chn(tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return tx_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) k3_udma_glue_release_tx_chn(tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (tx_chn->psil_paired) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) xudma_navss_psil_unpair(tx_chn->common.udmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) tx_chn->common.src_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) tx_chn->common.dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) tx_chn->psil_paired = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) xudma_tchan_put(tx_chn->common.udmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) tx_chn->udma_tchanx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (tx_chn->ringtxcq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) k3_ringacc_ring_free(tx_chn->ringtxcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (tx_chn->ringtx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) k3_ringacc_ring_free(tx_chn->ringtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct cppi5_host_desc_t *desc_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) dma_addr_t desc_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) u32 ringtxcq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dma_addr_t *desc_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) atomic_inc(&tx_chn->free_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) UDMA_PEER_RT_EN_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) UDMA_CHAN_RT_CTL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) xudma_tchanrt_write(tx_chn->udma_tchanx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) val = xudma_tchanrt_read(tx_chn->udma_tchanx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) UDMA_CHAN_RT_CTL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dev_err(tx_chn->common.dev, "TX tdown timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) val = xudma_tchanrt_read(tx_chn->udma_tchanx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) UDMA_CHAN_RT_PEER_RT_EN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) void (*cleanup)(void *data, dma_addr_t desc_dma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dma_addr_t desc_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int occ_tx, i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* reset TXCQ as it is not input for udma - expected to be empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (tx_chn->ringtxcq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) k3_ringacc_ring_reset(tx_chn->ringtxcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * TXQ reset need to be special way as it is input for udma and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * state cached by udma, so:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * 1) save TXQ occ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * 2) clean up TXQ and call callback .cleanup() for each desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * 3) reset TXQ in a special way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) for (i = 0; i < occ_tx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) cleanup(data, desc_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return tx_chn->common.hdesc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return tx_chn->virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) memset(&req, 0, sizeof(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) req.nav_id = tisci_rm->tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) req.index = rx_chn->udma_rchan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * and udmax impl, so just configure it to invalid value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) req.rxcq_qnum = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Default flow + extra ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) req.flowid_start = rx_chn->flow_id_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) req.flowid_cnt = rx_chn->flow_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) req.rx_atype = rx_chn->common.atype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) rx_chn->udma_rchan_id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) u32 flow_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (IS_ERR_OR_NULL(flow->udma_rflow))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (flow->ringrxfdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) k3_ringacc_ring_free(flow->ringrxfdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (flow->ringrx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) k3_ringacc_ring_free(flow->ringrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) flow->udma_rflow = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) rx_chn->flows_ready--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) u32 flow_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct k3_udma_glue_rx_flow_cfg *flow_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct device *dev = rx_chn->common.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct ti_sci_msg_rm_udmap_flow_cfg req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int rx_ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int rx_ringfdq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) flow->udma_rflow_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (IS_ERR(flow->udma_rflow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ret = PTR_ERR(flow->udma_rflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) dev_err(dev, "UDMAX rflow get err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) goto err_rflow_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* request and cfg rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) flow_cfg->ring_rxfdq0_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) flow_cfg->ring_rxq_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) &flow->ringrxfdq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) &flow->ringrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) goto err_rflow_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) dev_err(dev, "Failed to cfg ringrx %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto err_ringrxfdq_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) goto err_ringrxfdq_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (rx_chn->remote) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) rx_ring_id = TI_SCI_RESOURCE_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) memset(&req, 0, sizeof(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) req.valid_params =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) req.nav_id = tisci_rm->tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) req.flow_index = flow->udma_rflow_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (rx_chn->common.epib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) req.rx_einfo_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (rx_chn->common.psdata_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) req.rx_psinfo_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (flow_cfg->rx_error_handling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) req.rx_error_handling = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) req.rx_desc_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) req.rx_dest_qnum = rx_ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) req.rx_src_tag_hi_sel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) req.rx_dest_tag_hi_sel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) req.rx_dest_tag_lo_sel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) req.rx_fdq1_qnum = rx_ringfdq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) req.rx_fdq2_qnum = rx_ringfdq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) req.rx_fdq3_qnum = rx_ringfdq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) goto err_ringrxfdq_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) rx_chn->flows_ready++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dev_dbg(dev, "flow%d config done. ready:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) flow->udma_rflow_id, rx_chn->flows_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) err_ringrxfdq_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) k3_ringacc_ring_free(flow->ringrxfdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) k3_ringacc_ring_free(flow->ringrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) err_rflow_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) flow->udma_rflow = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct device *dev = chn->common.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dev_dbg(dev, "dump_rx_chn:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) "udma_rchan_id: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) "src_thread: %08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) "dst_thread: %08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) "epib: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) "hdesc_size: %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) "psdata_size: %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) "swdata_size: %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) "flow_id_base: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) "flow_num: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) chn->udma_rchan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) chn->common.src_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) chn->common.dst_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) chn->common.epib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) chn->common.hdesc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) chn->common.psdata_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) chn->common.swdata_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) chn->flow_id_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) chn->flow_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) char *mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct device *dev = chn->common.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) dev_dbg(dev, "=== dump ===> %s\n", mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) xudma_rchanrt_read(chn->udma_rchanx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) UDMA_CHAN_RT_PEER_RT_EN_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct k3_udma_glue_rx_channel_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* default rflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (cfg->flow_id_use_rxchan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* not a GP rflows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (rx_chn->flow_id_base != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* Allocate range of GP rflows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) rx_chn->flow_id_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) rx_chn->flow_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) rx_chn->flow_id_base, rx_chn->flow_num, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) rx_chn->flow_id_base = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static struct k3_udma_glue_rx_channel *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct k3_udma_glue_rx_channel_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct k3_udma_glue_rx_channel *rx_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (cfg->flow_id_num <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (cfg->flow_id_num != 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!rx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) rx_chn->common.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) rx_chn->common.swdata_size = cfg->swdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) rx_chn->remote = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /* parse of udmap channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) &rx_chn->common, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) rx_chn->common.psdata_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) rx_chn->common.swdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* request and cfg UDMAP RX channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (IS_ERR(rx_chn->udma_rchanx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ret = PTR_ERR(rx_chn->udma_rchanx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) dev_err(dev, "UDMAX rchanx get err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) rx_chn->flow_num = cfg->flow_id_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) rx_chn->flow_id_base = cfg->flow_id_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Use RX channel id as flow id: target dev can't generate flow_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (cfg->flow_id_use_rxchan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) rx_chn->flow_id_base = rx_chn->udma_rchan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) sizeof(*rx_chn->flows), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!rx_chn->flows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) for (i = 0; i < rx_chn->flow_num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* request and cfg psi-l */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) rx_chn->common.dst_thread =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) xudma_dev_get_psil_base(rx_chn->common.udmax) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) rx_chn->udma_rchan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ret = k3_udma_glue_cfg_rx_chn(rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) dev_err(dev, "Failed to cfg rchan %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* init default RX flow only if flow_num = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (cfg->def_flow_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ret = xudma_navss_psil_pair(rx_chn->common.udmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) rx_chn->common.src_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) rx_chn->common.dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) dev_err(dev, "PSI-L request err %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) rx_chn->psil_paired = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* reset RX RT registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) k3_udma_glue_disable_rx_chn(rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) k3_udma_glue_dump_rx_chn(rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return rx_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) k3_udma_glue_release_rx_chn(rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static struct k3_udma_glue_rx_channel *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct k3_udma_glue_rx_channel_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct k3_udma_glue_rx_channel *rx_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (cfg->flow_id_num <= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) cfg->flow_id_use_rxchan_id ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) cfg->def_flow_cfg ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) cfg->flow_id_base < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * Remote RX channel is under control of Remote CPU core, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * Linux can only request and manipulate by dedicated RX flows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (!rx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) rx_chn->common.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) rx_chn->common.swdata_size = cfg->swdata_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) rx_chn->remote = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) rx_chn->udma_rchan_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) rx_chn->flow_num = cfg->flow_id_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) rx_chn->flow_id_base = cfg->flow_id_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) rx_chn->psil_paired = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* parse of udmap channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) &rx_chn->common, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) rx_chn->common.psdata_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) rx_chn->common.swdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) sizeof(*rx_chn->flows), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (!rx_chn->flows) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) for (i = 0; i < rx_chn->flow_num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) k3_udma_glue_dump_rx_chn(rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return rx_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) k3_udma_glue_release_rx_chn(rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct k3_udma_glue_rx_channel *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct k3_udma_glue_rx_channel_cfg *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (cfg->remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (IS_ERR_OR_NULL(rx_chn->common.udmax))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (rx_chn->psil_paired) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) xudma_navss_psil_unpair(rx_chn->common.udmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) rx_chn->common.src_thread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) rx_chn->common.dst_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) rx_chn->psil_paired = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) for (i = 0; i < rx_chn->flow_num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) k3_udma_glue_release_rx_flow(rx_chn, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) xudma_free_gp_rflow_range(rx_chn->common.udmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) rx_chn->flow_id_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) rx_chn->flow_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) xudma_rchan_put(rx_chn->common.udmax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) rx_chn->udma_rchanx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u32 flow_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct k3_udma_glue_rx_flow_cfg *flow_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (flow_idx >= rx_chn->flow_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) u32 flow_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct k3_udma_glue_rx_flow *flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (flow_idx >= rx_chn->flow_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) flow = &rx_chn->flows[flow_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return k3_ringacc_get_ring_id(flow->ringrxfdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return rx_chn->flow_id_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) u32 flow_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct device *dev = rx_chn->common.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct ti_sci_msg_rm_udmap_flow_cfg req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) int rx_ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) int rx_ringfdq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (!rx_chn->remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) memset(&req, 0, sizeof(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) req.valid_params =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) req.nav_id = tisci_rm->tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) req.flow_index = flow->udma_rflow_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) req.rx_dest_qnum = rx_ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) req.rx_fdq1_qnum = rx_ringfdq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) req.rx_fdq2_qnum = rx_ringfdq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) req.rx_fdq3_qnum = rx_ringfdq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) u32 flow_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct device *dev = rx_chn->common.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct ti_sci_msg_rm_udmap_flow_cfg req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (!rx_chn->remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) memset(&req, 0, sizeof(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) req.valid_params =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) req.nav_id = tisci_rm->tisci_dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) req.flow_index = flow->udma_rflow_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (rx_chn->remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (rx_chn->flows_ready < rx_chn->flow_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) UDMA_CHAN_RT_CTL_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) UDMA_PEER_RT_EN_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) xudma_rchanrt_write(rx_chn->udma_rchanx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (rx_chn->remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) val = xudma_rchanrt_read(rx_chn->udma_rchanx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) UDMA_CHAN_RT_CTL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dev_err(rx_chn->common.dev, "RX tdown timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) val = xudma_rchanrt_read(rx_chn->udma_rchanx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) UDMA_CHAN_RT_PEER_RT_EN_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) u32 flow_num, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct device *dev = rx_chn->common.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) dma_addr_t desc_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) int occ_rx, i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* reset RXCQ as it is not input for udma - expected to be empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (flow->ringrx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) k3_ringacc_ring_reset(flow->ringrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* Skip RX FDQ in case one FDQ is used for the set of flows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (skip_fdq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * RX FDQ reset need to be special way as it is input for udma and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * state cached by udma, so:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * 1) save RX FDQ occ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * 2) clean up RX FDQ and call callback .cleanup() for each desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * 3) reset RX FDQ in a special way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) for (i = 0; i < occ_rx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) dev_err(dev, "RX reset pop %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) cleanup(data, desc_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) u32 flow_num, struct cppi5_host_desc_t *desc_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) dma_addr_t desc_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) u32 flow_num, dma_addr_t *desc_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) u32 flow_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) struct k3_udma_glue_rx_flow *flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) flow = &rx_chn->flows[flow_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return flow->virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);