Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) // Copyright 2019 NXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/sys_soc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/fsl/mc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <soc/fsl/dpaa2-io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "../virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include "dpdmai.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "dpaa2-qdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) static bool smmu_disable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	return container_of(vd, struct dpaa2_qdma_comp, vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 					      sizeof(struct dpaa2_fd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 					      sizeof(struct dpaa2_fd), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (!dpaa2_chan->fd_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 					      sizeof(struct dpaa2_fl_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 					      sizeof(struct dpaa2_fl_entry), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	if (!dpaa2_chan->fl_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		goto err_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	dpaa2_chan->sdd_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		dma_pool_create("sdd_pool", dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 				sizeof(struct dpaa2_qdma_sd_d),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 				sizeof(struct dpaa2_qdma_sd_d), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (!dpaa2_chan->sdd_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		goto err_fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	return dpaa2_qdma->desc_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) err_fl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	dma_pool_destroy(dpaa2_chan->fl_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) err_fd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	dma_pool_destroy(dpaa2_chan->fd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	dma_pool_destroy(dpaa2_chan->fd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	dma_pool_destroy(dpaa2_chan->fl_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	dma_pool_destroy(dpaa2_chan->sdd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	dpaa2_qdma->desc_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * Request a command descriptor for enqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static struct dpaa2_qdma_comp *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	struct device *dev = &qdma_priv->dpdmai_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	struct dpaa2_qdma_comp *comp_temp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (list_empty(&dpaa2_chan->comp_free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		if (!comp_temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		comp_temp->fd_virt_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 				       &comp_temp->fd_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		if (!comp_temp->fd_virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			goto err_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		comp_temp->fl_virt_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 				       &comp_temp->fl_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		if (!comp_temp->fl_virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			goto err_fd_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		comp_temp->desc_virt_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				       &comp_temp->desc_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		if (!comp_temp->desc_virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			goto err_fl_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		comp_temp->qchan = dpaa2_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		return comp_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	comp_temp = list_first_entry(&dpaa2_chan->comp_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 				     struct dpaa2_qdma_comp, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	list_del(&comp_temp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	comp_temp->qchan = dpaa2_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	return comp_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) err_fl_virt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		dma_pool_free(dpaa2_chan->fl_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			      comp_temp->fl_virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			      comp_temp->fl_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) err_fd_virt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		dma_pool_free(dpaa2_chan->fd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			      comp_temp->fd_virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			      comp_temp->fd_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) err_comp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	kfree(comp_temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	dev_err(dev, "Failed to request descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	struct dpaa2_fd *fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	fd = dpaa2_comp->fd_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	memset(fd, 0, sizeof(struct dpaa2_fd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	/* fd populated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 * Bypass memory translation, Frame list format, short length disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	 * we need to disable BMT if fsl-mc use iova addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (smmu_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* first frame list for descriptor buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 				 struct dpaa2_qdma_comp *dpaa2_comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 				 bool wrt_changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	struct dpaa2_qdma_sd_d *sdd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	sdd = dpaa2_comp->desc_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	memset(sdd, 0, 2 * (sizeof(*sdd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* source descriptor CMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	sdd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	/* dest descriptor CMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	if (wrt_changed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	/* first frame list to source descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	dpaa2_fl_set_len(f_list, 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	/* bypass memory translation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (smmu_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* source and destination frame list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			   dma_addr_t dst, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			   size_t len, uint8_t fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	/* source frame list to source buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	dpaa2_fl_set_addr(f_list, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	dpaa2_fl_set_len(f_list, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	/* single buffer frame or scatter gather frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	/* bypass memory translation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (smmu_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	f_list++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	/* destination frame list to destination buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	dpaa2_fl_set_addr(f_list, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	dpaa2_fl_set_len(f_list, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	/* single buffer frame or scatter gather frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	dpaa2_fl_set_final(f_list, QDMA_FL_F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	/* bypass memory translation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (smmu_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static struct dma_async_tx_descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			dma_addr_t src, size_t len, ulong flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	struct dpaa2_qdma_engine *dpaa2_qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	struct dpaa2_qdma_comp *dpaa2_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct dpaa2_fl_entry *f_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	bool wrt_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	dpaa2_qdma = dpaa2_chan->qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (!dpaa2_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	/* populate Frame descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	f_list = dpaa2_comp->fl_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	/* first frame list for descriptor buffer (logn format) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	f_list++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	struct dpaa2_qdma_comp *dpaa2_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	struct dpaa2_fd *fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	spin_lock(&dpaa2_chan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (vchan_issue_pending(&dpaa2_chan->vchan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		vdesc = vchan_next_desc(&dpaa2_chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		if (!vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			goto err_enqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		dpaa2_comp = to_fsl_qdma_comp(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		fd = dpaa2_comp->fd_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		list_del(&vdesc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			list_del(&dpaa2_comp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			list_add_tail(&dpaa2_comp->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 				      &dpaa2_chan->comp_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) err_enqueue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	spin_unlock(&dpaa2_chan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct dpaa2_qdma_priv_per_prio *ppriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct device *dev = &ls_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct dpaa2_qdma_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	u8 prio_def = DPDMAI_PRIO_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	priv->dpqdma_id = ls_dev->obj_desc.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	/* Get the handle for the DPDMAI this interface is associate with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		dev_err(dev, "dpdmai_open() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	dev_dbg(dev, "Opened dpdmai object successfully\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 				    &priv->dpdmai_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		dev_err(dev, "dpdmai_get_attributes() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		dev_err(dev, "DPDMAI major version mismatch\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			     "Found %u.%u, supported version is %u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 				priv->dpdmai_attr.version.major,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 				priv->dpdmai_attr.version.minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 				DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		dev_err(dev, "DPDMAI minor version mismatch\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			     "Found %u.%u, supported version is %u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 				priv->dpdmai_attr.version.major,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 				priv->dpdmai_attr.version.minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 				DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	if (!ppriv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	priv->ppriv = ppriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	for (i = 0; i < priv->num_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 					  i, &priv->rx_queue_attr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			dev_err(dev, "dpdmai_get_rx_queue() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 					  i, &priv->tx_fqid[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			dev_err(dev, "dpdmai_get_tx_queue() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		ppriv->req_fqid = priv->tx_fqid[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		ppriv->prio = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		ppriv->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		ppriv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			struct dpaa2_qdma_priv_per_prio, nctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	struct dpaa2_qdma_priv *priv = ppriv->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	u32 n_chans = priv->dpaa2_qdma->n_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	struct dpaa2_qdma_chan *qchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	const struct dpaa2_fd *fd_eq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	const struct dpaa2_fd *fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	struct dpaa2_dq *dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	int is_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	int found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 					       ppriv->store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	} while (err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	while (!is_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			dq = dpaa2_io_store_next(ppriv->store, &is_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		} while (!is_last && !dq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		if (!dq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			dev_err(priv->dev, "FQID returned no valid frames!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		/* obtain FD and process the error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		fd = dpaa2_dq_fd(dq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		status = dpaa2_fd_get_ctrl(fd) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			dev_err(priv->dev, "FD error occurred\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		for (i = 0; i < n_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			qchan = &priv->dpaa2_qdma->chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			spin_lock(&qchan->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			if (list_empty(&qchan->comp_used)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 				spin_unlock(&qchan->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 						 &qchan->comp_used, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 				fd_eq = dpaa2_comp->fd_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 				if (le64_to_cpu(fd_eq->simple.addr) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 				    le64_to_cpu(fd->simple.addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 					spin_lock(&qchan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 					vchan_cookie_complete(&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 							dpaa2_comp->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 					spin_unlock(&qchan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 					found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			spin_unlock(&qchan->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	dpaa2_io_service_rearm(NULL, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	struct dpaa2_qdma_priv_per_prio *ppriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	struct device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	int i, num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	num = priv->num_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	ppriv = priv->ppriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		ppriv->nctx.is_cdan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		ppriv->nctx.id = ppriv->rsp_fqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 			dev_err(dev, "Notification register failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			goto err_service;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		ppriv->store =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		if (!ppriv->store) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			dev_err(dev, "dpaa2_io_store_create() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			goto err_store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		ppriv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) err_store:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) err_service:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	ppriv--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	while (ppriv >= priv->ppriv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		dpaa2_io_store_destroy(ppriv->store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		ppriv--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	for (i = 0; i < priv->num_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		dpaa2_io_store_destroy(ppriv->store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		ppriv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	struct device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	for (i = 0; i < priv->num_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		ppriv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	struct dpdmai_rx_queue_cfg rx_queue_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	struct dpaa2_qdma_priv_per_prio *ppriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	struct device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	struct fsl_mc_device *ls_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	int i, num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	ls_dev = to_fsl_mc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	num = priv->num_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	ppriv = priv->ppriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 					DPDMAI_QUEUE_OPT_DEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		rx_queue_cfg.dest_cfg.priority = ppriv->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 					  rx_queue_cfg.dest_cfg.priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 					  &rx_queue_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			dev_err(dev, "dpdmai_set_rx_queue() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		ppriv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	struct device *dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	struct fsl_mc_device *ls_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	ls_dev = to_fsl_mc_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	for (i = 0; i < priv->num_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		ppriv->nctx.qman64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		ppriv->nctx.dpio_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		ppriv++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		dev_err(dev, "dpdmai_reset() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 				   struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	list_for_each_entry_safe(comp_tmp, _comp_tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 				 head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		spin_lock_irqsave(&qchan->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		list_del(&comp_tmp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		spin_unlock_irqrestore(&qchan->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		dma_pool_free(qchan->fd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 			      comp_tmp->fd_virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 			      comp_tmp->fd_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		dma_pool_free(qchan->fl_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 			      comp_tmp->fl_virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 			      comp_tmp->fl_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		dma_pool_free(qchan->sdd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 			      comp_tmp->desc_virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 			      comp_tmp->desc_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		kfree(comp_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	struct dpaa2_qdma_chan *qchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	int num, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	num = dpaa2_qdma->n_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		qchan = &dpaa2_qdma->chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		dma_pool_destroy(qchan->fd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		dma_pool_destroy(qchan->fl_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		dma_pool_destroy(qchan->sdd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	struct dpaa2_qdma_comp *dpaa2_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	struct dpaa2_qdma_chan *qchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	dpaa2_comp = to_fsl_qdma_comp(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	qchan = dpaa2_comp->qchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	spin_lock_irqsave(&qchan->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	list_del(&dpaa2_comp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	spin_unlock_irqrestore(&qchan->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	struct dpaa2_qdma_chan *dpaa2_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	int num = priv->num_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	for (i = 0; i < dpaa2_qdma->n_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 		dpaa2_chan = &dpaa2_qdma->chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		dpaa2_chan->qdma = dpaa2_qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		dpaa2_chan->fqid = priv->tx_fqid[i % num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		spin_lock_init(&dpaa2_chan->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		INIT_LIST_HEAD(&dpaa2_chan->comp_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		INIT_LIST_HEAD(&dpaa2_chan->comp_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	struct device *dev = &dpdmai_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	struct dpaa2_qdma_engine *dpaa2_qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	struct dpaa2_qdma_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	dev_set_drvdata(dev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	priv->dpdmai_dev = dpdmai_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	priv->iommu_domain = iommu_get_domain_for_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	if (priv->iommu_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		smmu_disable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	/* obtain a MC portal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		if (err == -ENXIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 			err = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 			dev_err(dev, "MC portal allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		goto err_mcportal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	/* DPDMAI initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	err = dpaa2_qdma_setup(dpdmai_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 		dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		goto err_dpdmai_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	/* DPIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	err = dpaa2_qdma_dpio_setup(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		goto err_dpio_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	/* DPDMAI binding to DPIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	err = dpaa2_dpdmai_bind(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		goto err_bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	/* DPDMAI enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		dev_err(dev, "dpdmai_enable() faile\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		goto err_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	if (!dpaa2_qdma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		goto err_eng;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	priv->dpaa2_qdma = dpaa2_qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	dpaa2_qdma->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	dpaa2_qdma->desc_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	dpaa2_qdma->n_chans = NUM_CH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	dpaa2_dpdmai_init_channels(dpaa2_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	if (soc_device_match(soc_fixup_tuning))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		dpaa2_qdma->qdma_wrtype_fixup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 		dpaa2_qdma->qdma_wrtype_fixup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	dpaa2_qdma->dma_dev.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	dpaa2_qdma->dma_dev.device_alloc_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		dpaa2_qdma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	dpaa2_qdma->dma_dev.device_free_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		dpaa2_qdma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	err = dma_async_device_register(&dpaa2_qdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		dev_err(dev, "Can't register NXP QDMA engine.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 		goto err_dpaa2_qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) err_dpaa2_qdma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	kfree(dpaa2_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) err_eng:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) err_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	dpaa2_dpdmai_dpio_unbind(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) err_bind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	dpaa2_dpmai_store_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	dpaa2_dpdmai_dpio_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) err_dpio_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	kfree(priv->ppriv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) err_dpdmai_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	fsl_mc_portal_free(priv->mc_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) err_mcportal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	dev_set_drvdata(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	struct dpaa2_qdma_engine *dpaa2_qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	struct dpaa2_qdma_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	dev = &ls_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	dpaa2_qdma = priv->dpaa2_qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	dpaa2_dpdmai_dpio_unbind(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	dpaa2_dpmai_store_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	dpaa2_dpdmai_dpio_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	fsl_mc_portal_free(priv->mc_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	dev_set_drvdata(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	dpaa2_dpdmai_free_channels(dpaa2_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	dma_async_device_unregister(&dpaa2_qdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	kfree(dpaa2_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	struct dpaa2_qdma_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	dev = &ls_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	dpaa2_dpdmai_dpio_unbind(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 		.vendor = FSL_MC_VENDOR_FREESCALE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 		.obj_type = "dpdmai",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	{ .vendor = 0x0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) static struct fsl_mc_driver dpaa2_qdma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 		.name	= "dpaa2-qdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 		.owner  = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	.probe          = dpaa2_qdma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	.remove		= dpaa2_qdma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	.shutdown	= dpaa2_qdma_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 	.match_id_table	= dpaa2_qdma_id_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static int __init dpaa2_qdma_driver_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	return fsl_mc_driver_register(&(dpaa2_qdma_driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) late_initcall(dpaa2_qdma_driver_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static void __exit fsl_qdma_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) module_exit(fsl_qdma_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) MODULE_ALIAS("platform:fsl-dpaa2-qdma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");