Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/io-64-nonatomic-lo-hi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <uapi/linux/idxd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "idxd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	struct idxd_dma_chan *idxd_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	idxd_chan = container_of(c, struct idxd_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	return idxd_chan->wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) void idxd_dma_complete_txd(struct idxd_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 			   enum idxd_complete_type comp_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	struct dmaengine_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	int complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	if (desc->completion->status == DSA_COMP_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		res.result = DMA_TRANS_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	else if (desc->completion->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		res.result = DMA_TRANS_WRITE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	else if (comp_type == IDXD_COMPLETE_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		res.result = DMA_TRANS_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	tx = &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (complete && tx->cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		dma_cookie_complete(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		dma_descriptor_unmap(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		dmaengine_desc_get_callback_invoke(tx, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		tx->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		tx->callback_result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static void op_flag_setup(unsigned long flags, u32 *desc_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	*desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		*desc_flags |= IDXD_OP_FLAG_RCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static inline void set_completion_address(struct idxd_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 					  u64 *compl_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		*compl_addr = desc->compl_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static inline void idxd_prep_desc_common(struct idxd_wq *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 					 struct dsa_hw_desc *hw, char opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 					 u64 addr_f1, u64 addr_f2, u64 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 					 u64 compl, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct idxd_device *idxd = wq->idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	hw->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	hw->opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	hw->src_addr = addr_f1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	hw->dst_addr = addr_f2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	hw->xfer_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	hw->completion_addr = compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 * Descriptor completion vectors are 1-8 for MSIX. We will round
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * robin through the 8 vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	hw->int_handle =  wq->vec_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		       dma_addr_t dma_src, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct idxd_wq *wq = to_idxd_wq(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	u32 desc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	struct idxd_device *idxd = wq->idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct idxd_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	if (wq->state != IDXD_WQ_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (len > idxd->max_xfer_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	op_flag_setup(flags, &desc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (IS_ERR(desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			      dma_src, dma_dest, len, desc->compl_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			      desc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	desc->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	return &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct idxd_wq *wq = to_idxd_wq(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	struct device *dev = &wq->idxd->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	idxd_wq_get(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	dev_dbg(dev, "%s: client_count: %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		idxd_wq_refcount(wq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void idxd_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	struct idxd_wq *wq = to_idxd_wq(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct device *dev = &wq->idxd->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	idxd_wq_put(wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	dev_dbg(dev, "%s: client_count: %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		idxd_wq_refcount(wq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 					  dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 					  struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	return DMA_OUT_OF_ORDER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * issue_pending() does not need to do anything since tx_submit() does the job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	struct dma_chan *c = tx->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct idxd_wq *wq = to_idxd_wq(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	rc = idxd_submit_desc(wq, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		idxd_free_desc(wq, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static void idxd_dma_release(struct dma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	kfree(idxd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int idxd_register_dma_device(struct idxd_device *idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	struct idxd_dma_dev *idxd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	struct dma_device *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct device *dev = &idxd->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (!idxd_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	dma = &idxd_dma->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	INIT_LIST_HEAD(&dma->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	dma->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	dma->device_release = idxd_dma_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		dma_cap_set(DMA_MEMCPY, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	dma->device_tx_status = idxd_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	dma->device_issue_pending = idxd_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	dma->device_free_chan_resources = idxd_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	rc = dma_async_device_register(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		kfree(idxd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	idxd_dma->idxd = idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	 * This pointer is protected by the refs taken by the dma_chan. It will remain valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 * as long as there are outstanding channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	idxd->idxd_dma = idxd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void idxd_unregister_dma_device(struct idxd_device *idxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	dma_async_device_unregister(&idxd->idxd_dma->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int idxd_register_dma_channel(struct idxd_wq *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	struct idxd_device *idxd = wq->idxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	struct dma_device *dma = &idxd->idxd_dma->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	struct device *dev = &idxd->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct idxd_dma_chan *idxd_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (!idxd_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	chan = &idxd_chan->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	chan->device = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	list_add_tail(&chan->device_node, &dma->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	for (i = 0; i < wq->num_descs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		struct idxd_desc *desc = wq->descs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		dma_async_tx_descriptor_init(&desc->txd, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		desc->txd.tx_submit = idxd_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	rc = dma_async_device_channel_register(dma, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		kfree(idxd_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	wq->idxd_chan = idxd_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	idxd_chan->wq = wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	get_device(&wq->conf_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) void idxd_unregister_dma_channel(struct idxd_wq *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	struct dma_chan *chan = &idxd_chan->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	dma_async_device_channel_unregister(&idxd_dma->dma, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	list_del(&chan->device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	kfree(wq->idxd_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	wq->idxd_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	put_device(&wq->conf_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }