Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * offload engine driver for the Intel Xscale series of i/o processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright © 2006, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * This driver supports the asynchrounous DMA copy and RAID engines available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/raid/pq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "iop-adma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define to_iop_adma_device(dev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	container_of(dev, struct iop_adma_device, common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define tx_to_iop_adma_slot(tx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	container_of(tx, struct iop_adma_desc_slot, async_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * iop_adma_free_slots - flags descriptor slots for reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * @slot: Slot to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * Caller must hold &iop_chan->lock while calling this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	int stride = slot->slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	while (stride--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 		slot->slots_per_op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 		slot = list_entry(slot->slot_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 				struct iop_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 				slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) static dma_cookie_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	struct dma_async_tx_descriptor *tx = &desc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	BUG_ON(tx->cookie < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	if (tx->cookie > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		cookie = tx->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		tx->cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		/* call the callback (must not sleep or submit new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		 * operations to this channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		dmaengine_desc_get_callback_invoke(tx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		dma_descriptor_unmap(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		if (desc->group_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 			desc->group_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	/* run dependent operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	dma_run_dependencies(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	struct iop_adma_chan *iop_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	/* the client is allowed to attach dependent operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	 * until 'ack' is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	if (!async_tx_test_ack(&desc->async_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	/* leave the last descriptor in the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	 * so we can append to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	if (desc->chain_node.next == &iop_chan->chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	dev_dbg(iop_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		"\tfree slot: %d slots_per_op: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		desc->idx, desc->slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	list_del(&desc->chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	iop_adma_free_slots(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	dma_cookie_t cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	int busy = iop_chan_is_busy(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	/* free completed slots from the chain starting with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	 * the oldest descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 					chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		pr_debug("\tcookie: %d slot: %d busy: %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 			"this_desc: %pad next_desc: %#llx ack: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			iter->async_tx.cookie, iter->idx, busy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 			&iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 			async_tx_test_ack(&iter->async_tx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		prefetch(_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		prefetch(&_iter->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		/* do not advance past the current descriptor loaded into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		 * hardware channel, subsequent descriptors are either in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		 * process or have not been submitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		if (seen_current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		/* stop the search if we reach the current descriptor and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		 * channel is busy, or if it appears that the current descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		 * needs to be re-read (i.e. has been appended to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		if (iter->async_tx.phys == current_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			BUG_ON(seen_current++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 			if (busy || iop_desc_get_next_desc(iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		/* detect the start of a group transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		if (!slot_cnt && !slots_per_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			slot_cnt = iter->slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 			slots_per_op = iter->slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 			if (slot_cnt <= slots_per_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 				slot_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 				slots_per_op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		if (slot_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			pr_debug("\tgroup++\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 			if (!grp_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 				grp_start = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 			slot_cnt -= slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 		/* all the members of a group are complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		if (slots_per_op != 0 && slot_cnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			struct iop_adma_desc_slot *grp_iter, *_grp_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			int end_of_chain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			pr_debug("\tgroup end\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			/* collect the total results */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 			if (grp_start->xor_check_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 				u32 zero_sum_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 				slot_cnt = grp_start->slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 				grp_iter = grp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 				list_for_each_entry_from(grp_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 					&iop_chan->chain, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 					zero_sum_result |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 					    iop_desc_get_zero_result(grp_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 					pr_debug("\titer%d result: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 					    grp_iter->idx, zero_sum_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 					slot_cnt -= slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 					if (slot_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 				pr_debug("\tgrp_start->xor_check_result: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 					grp_start->xor_check_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 				*grp_start->xor_check_result = zero_sum_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 			/* clean up the group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 			slot_cnt = grp_start->slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			grp_iter = grp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			list_for_each_entry_safe_from(grp_iter, _grp_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 				&iop_chan->chain, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 				cookie = iop_adma_run_tx_complete_actions(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 					grp_iter, iop_chan, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 				slot_cnt -= slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 				end_of_chain = iop_adma_clean_slot(grp_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 					iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 				if (slot_cnt == 0 || end_of_chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			/* the group should be complete at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			BUG_ON(slot_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 			slots_per_op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			grp_start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			if (end_of_chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		} else if (slots_per_op) /* wait for group completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		/* write back zero sum results (single descriptor case) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		if (iter->xor_check_result && iter->async_tx.cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			*iter->xor_check_result =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 				iop_desc_get_zero_result(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		cookie = iop_adma_run_tx_complete_actions(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 					iter, iop_chan, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		if (iop_adma_clean_slot(iter, iop_chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	if (cookie > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		iop_chan->common.completed_cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		pr_debug("\tcompleted cookie %d\n", cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	__iop_adma_slot_cleanup(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) static void iop_adma_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	struct iop_adma_chan *iop_chan = from_tasklet(iop_chan, t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 						      irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	/* lockdep will flag depedency submissions as potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	 * recursive locking, this is not the case as a dependency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	 * submission will never recurse a channels submit routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	 * There are checks in async_tx.c to prevent this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	__iop_adma_slot_cleanup(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	spin_unlock(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) static struct iop_adma_desc_slot *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			int slots_per_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	LIST_HEAD(chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	int slots_found, retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	/* start search from the last allocated descrtiptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	 * if a contiguous allocation can not be found start searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	 * from the beginning of the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	slots_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	if (retry == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		iter = iop_chan->last_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		iter = list_entry(&iop_chan->all_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			struct iop_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	list_for_each_entry_safe_continue(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		iter, _iter, &iop_chan->all_slots, slot_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		prefetch(_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		prefetch(&_iter->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		if (iter->slots_per_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			/* give up after finding the first busy slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			 * on the second pass through the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			if (retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			slots_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		/* start the allocation if the slot is correctly aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		if (!slots_found++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			if (iop_desc_is_aligned(iter, slots_per_op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 				alloc_start = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 				slots_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		if (slots_found == num_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 			struct iop_adma_desc_slot *alloc_tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			struct iop_adma_desc_slot *last_used = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			iter = alloc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			while (num_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 				int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 				dev_dbg(iop_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 					"allocated slot: %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 					"(desc %p phys: %#llx) slots_per_op %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 					iter->idx, iter->hw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 					(u64)iter->async_tx.phys, slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 				/* pre-ack all but the last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 				if (num_slots != slots_per_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 					async_tx_ack(&iter->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 				list_add_tail(&iter->chain_node, &chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 				alloc_tail = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 				iter->async_tx.cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 				iter->slot_cnt = num_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 				iter->xor_check_result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 				for (i = 0; i < slots_per_op; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 					iter->slots_per_op = slots_per_op - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 					last_used = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 					iter = list_entry(iter->slot_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 						struct iop_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 						slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 				num_slots -= slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			alloc_tail->group_head = alloc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			alloc_tail->async_tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			list_splice(&chain, &alloc_tail->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 			iop_chan->last_used = last_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 			iop_desc_clear_next_desc(alloc_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			iop_desc_clear_next_desc(alloc_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			return alloc_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	if (!retry++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	/* perform direct reclaim if the allocation fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	__iop_adma_slot_cleanup(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		iop_chan->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		iop_chan->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		iop_chan_append(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) static dma_cookie_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	struct iop_adma_desc_slot *grp_start, *old_chain_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	int slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	dma_addr_t next_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	grp_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	slot_cnt = grp_start->slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	old_chain_tail = list_entry(iop_chan->chain.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		struct iop_adma_desc_slot, chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	list_splice_init(&sw_desc->tx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			 &old_chain_tail->chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	/* fix up the hardware chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	next_dma = grp_start->async_tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	iop_desc_set_next_desc(old_chain_tail, next_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	/* check for pre-chained descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	iop_paranoia(iop_desc_get_next_desc(sw_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	/* increment the pending count by the number of slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 * memcpy operations have a 1:1 (slot:operation) relation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 * other operations are heavier and will pop the threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * more often.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	iop_chan->pending += slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	iop_adma_check_threshold(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		__func__, sw_desc->async_tx.cookie, sw_desc->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  * iop_adma_alloc_chan_resources -  returns the number of allocated descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  * @chan: allocate descriptor resources for this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  * Note: We keep the slots for 1 operation on iop_chan->chain at all times.  To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  * greater than 2x the number slots needed to satisfy a device->max_xor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415)  * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416)  * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	char *hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	dma_addr_t dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	struct iop_adma_desc_slot *slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	int init = iop_chan->slots_allocated ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	struct iop_adma_platform_data *plat_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		dev_get_platdata(&iop_chan->device->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	/* Allocate descriptor slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		idx = iop_chan->slots_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		if (idx == num_descs_in_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		if (!slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			printk(KERN_INFO "IOP ADMA Channel only initialized"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 				" %d descriptor slots", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		slot->async_tx.tx_submit = iop_adma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		INIT_LIST_HEAD(&slot->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		INIT_LIST_HEAD(&slot->chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		INIT_LIST_HEAD(&slot->slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		dma_desc = iop_chan->device->dma_desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		slot->async_tx.phys = dma_desc + idx * IOP_ADMA_SLOT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		slot->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		iop_chan->slots_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		list_add_tail(&slot->slot_node, &iop_chan->all_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	} while (iop_chan->slots_allocated < num_descs_in_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	if (idx && !iop_chan->last_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		iop_chan->last_used = list_entry(iop_chan->all_slots.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 					struct iop_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 					slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	dev_dbg(iop_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		"allocated %d descriptor slots last_used: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		iop_chan->slots_allocated, iop_chan->last_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	/* initialize the channel and the chain with a null operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		if (dma_has_cap(DMA_MEMCPY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			iop_chan->device->common.cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			iop_chan_start_null_memcpy(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		else if (dma_has_cap(DMA_XOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			iop_chan->device->common.cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			iop_chan_start_null_xor(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	return (idx > 0) ? idx : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	struct iop_adma_desc_slot *sw_desc, *grp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		grp_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		iop_desc_init_interrupt(grp_start, iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			 dma_addr_t dma_src, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	struct iop_adma_desc_slot *sw_desc, *grp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	if (unlikely(!len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		__func__, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		grp_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		iop_desc_init_memcpy(grp_start, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		iop_desc_set_byte_count(grp_start, iop_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		iop_desc_set_memcpy_src_addr(grp_start, dma_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		      dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		      unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	struct iop_adma_desc_slot *sw_desc, *grp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (unlikely(!len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	dev_dbg(iop_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		"%s src_cnt: %d len: %zu flags: %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		__func__, src_cnt, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		grp_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		iop_desc_init_xor(grp_start, src_cnt, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		iop_desc_set_byte_count(grp_start, iop_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		while (src_cnt--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			iop_desc_set_xor_src_addr(grp_start, src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 						  dma_src[src_cnt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			  unsigned int src_cnt, size_t len, u32 *result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			  unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	struct iop_adma_desc_slot *sw_desc, *grp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (unlikely(!len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		__func__, src_cnt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		grp_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		iop_desc_init_zero_sum(grp_start, src_cnt, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		iop_desc_set_zero_sum_byte_count(grp_start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		grp_start->xor_check_result = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		pr_debug("\t%s: grp_start->xor_check_result: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			__func__, grp_start->xor_check_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		while (src_cnt--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 						       dma_src[src_cnt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		     unsigned int src_cnt, const unsigned char *scf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		     unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	struct iop_adma_desc_slot *sw_desc, *g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	int continue_srcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (unlikely(!len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	dev_dbg(iop_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		"%s src_cnt: %d len: %zu flags: %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		__func__, src_cnt, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	if (dmaf_p_disabled_continue(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		continue_srcs = 1+src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	else if (dmaf_continue(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		continue_srcs = 3+src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		continue_srcs = 0+src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		g = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		iop_desc_set_byte_count(g, iop_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		/* even if P is disabled its destination address (bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		 * [3:0]) must match Q.  It is ok if P points to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		 * invalid address, it won't be written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (flags & DMA_PREP_PQ_DISABLE_P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			dst[0] = dst[1] & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		iop_desc_set_pq_addr(g, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		for (i = 0; i < src_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		/* if we are continuing a previous operation factor in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		 * the old p and q values, see the comment for dma_maxpq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		 * in include/linux/dmaengine.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		if (dmaf_p_disabled_continue(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		else if (dmaf_continue(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		iop_desc_init_pq(g, i, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			 unsigned int src_cnt, const unsigned char *scf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			 size_t len, enum sum_check_flags *pqres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			 unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct iop_adma_desc_slot *sw_desc, *g;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	if (unlikely(!len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		__func__, src_cnt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		/* for validate operations p and q are tagged onto the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		 * end of the source list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		int pq_idx = src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		g = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		iop_desc_set_pq_zero_sum_byte_count(g, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		g->pq_check_result = pqres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		pr_debug("\t%s: g->pq_check_result: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			__func__, g->pq_check_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		while (src_cnt--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 							  src[src_cnt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 							  scf[src_cnt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) static void iop_adma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	struct iop_adma_desc_slot *iter, *_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	int in_use_descs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	iop_adma_slot_cleanup(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 					chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		in_use_descs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		list_del(&iter->chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	list_for_each_entry_safe_reverse(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		iter, _iter, &iop_chan->all_slots, slot_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		list_del(&iter->slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		kfree(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		iop_chan->slots_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	iop_chan->last_used = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		__func__, iop_chan->slots_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	/* one is ok since we left it on there on purpose */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (in_use_descs > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			in_use_descs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  * iop_adma_status - poll the status of an ADMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748)  * @chan: ADMA channel handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  * @cookie: ADMA transaction identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  * @txstate: a holder for the current state of the channel or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) static enum dma_status iop_adma_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 					dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 					struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	iop_adma_slot_cleanup(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	return dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) static irqreturn_t iop_adma_eot_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	struct iop_adma_chan *chan = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	dev_dbg(chan->device->common.dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	tasklet_schedule(&chan->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	iop_adma_device_clear_eot_status(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	struct iop_adma_chan *chan = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	dev_dbg(chan->device->common.dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	tasklet_schedule(&chan->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	iop_adma_device_clear_eoc_status(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) static irqreturn_t iop_adma_err_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	struct iop_adma_chan *chan = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	unsigned long status = iop_chan_get_status(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	dev_err(chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		"error ( %s%s%s%s%s%s%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		iop_is_err_int_parity(status, chan) ? "int_parity " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		iop_is_err_split_tx(status, chan) ? "split_tx " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	iop_adma_device_clear_err_status(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) static void iop_adma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	if (iop_chan->pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		iop_chan->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		iop_chan_append(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * Perform a transaction to verify the HW works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) #define IOP_ADMA_TEST_SIZE 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	void *src, *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	dma_addr_t src_dma, dest_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	struct iop_adma_chan *iop_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	dev_dbg(device->common.dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (!src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (!dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		kfree(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	/* Fill in src buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		((u8 *) src)[i] = (u8)i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	/* Start copy, using first DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	dma_chan = container_of(device->common.channels.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 				struct dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 				device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	dest_dma = dma_map_single(dma_chan->device->dev, dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 				IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	src_dma = dma_map_single(dma_chan->device->dev, src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 				IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 				      IOP_ADMA_TEST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	cookie = iop_adma_tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	iop_adma_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (iop_adma_status(dma_chan, cookie, NULL) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		dev_err(dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			"Self-test copy timed out, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	iop_chan = to_iop_adma_chan(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		dev_err(dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			"Self-test copy failed compare, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) free_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	iop_adma_free_chan_resources(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	kfree(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	kfree(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) iop_adma_xor_val_self_test(struct iop_adma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	int i, src_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct page *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	dma_addr_t dest_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	u8 cmp_byte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	u32 cmp_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	u32 zero_sum_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	struct iop_adma_chan *iop_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	dev_dbg(device->common.dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		if (!xor_srcs[src_idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			while (src_idx--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 				__free_page(xor_srcs[src_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	dest = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (!dest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		while (src_idx--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			__free_page(xor_srcs[src_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	/* Fill in src buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		u8 *ptr = page_address(xor_srcs[src_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		for (i = 0; i < PAGE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			ptr[i] = (1 << src_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		cmp_byte ^= (u8) (1 << src_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			(cmp_byte << 8) | cmp_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	memset(page_address(dest), 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	dma_chan = container_of(device->common.channels.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 				struct dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 				device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	/* test xor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 				PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 					   0, PAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 				   IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 				   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	cookie = iop_adma_tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	iop_adma_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	msleep(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	if (iop_adma_status(dma_chan, cookie, NULL) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		dev_err(dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			"Self-test xor timed out, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	iop_chan = to_iop_adma_chan(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		u32 *ptr = page_address(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		if (ptr[i] != cmp_word) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			dev_err(dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 				"Self-test xor failed compare, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		PAGE_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	/* skip zero sum if the capability is not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	/* zero sum the sources with the destintation page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		zero_sum_srcs[i] = xor_srcs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	zero_sum_srcs[i] = dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	zero_sum_result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		dma_srcs[i] = dma_map_page(dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 					   zero_sum_srcs[i], 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 					   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 				       IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 				       &zero_sum_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	cookie = iop_adma_tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	iop_adma_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	msleep(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		dev_err(dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			"Self-test zero sum timed out, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	if (zero_sum_result != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		dev_err(dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			"Self-test zero sum failed compare, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	/* test for non-zero parity sum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	zero_sum_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		dma_srcs[i] = dma_map_page(dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 					   zero_sum_srcs[i], 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 					   DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 				       IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 				       &zero_sum_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	cookie = iop_adma_tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	iop_adma_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	msleep(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		dev_err(dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			"Self-test non-zero sum timed out, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (zero_sum_result != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		dev_err(dma_chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			"Self-test non-zero sum failed compare, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) free_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	iop_adma_free_chan_resources(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	src_idx = IOP_ADMA_NUM_SRC_TEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	while (src_idx--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		__free_page(xor_srcs[src_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	__free_page(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) #ifdef CONFIG_RAID6_PQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	/* combined sources, software pq results, and extra hw pq results */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	/* ptr to the extra hw pq buffers defined above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	/* address conversion buffers (dma_map / page_address) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	u32 zero_sum_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	dev_dbg(device->common.dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	for (i = 0; i < ARRAY_SIZE(pq); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		pq[i] = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		if (!pq[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 				__free_page(pq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	/* Fill in src buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		pq_sw[i] = page_address(pq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	pq_sw[i] = page_address(pq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	pq_sw[i+1] = page_address(pq[i+1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	dma_chan = container_of(device->common.channels.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 				struct dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 				device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	dev = dma_chan->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	/* initialize the dests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	/* test pq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 					 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 				  IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 				  PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 				  DMA_PREP_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 				  DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	cookie = iop_adma_tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	iop_adma_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	msleep(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	if (iop_adma_status(dma_chan, cookie, NULL) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		dev_err(dev, "Self-test pq timed out, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		   page_address(pq_hw[0]), PAGE_SIZE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		dev_err(dev, "Self-test p failed compare, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		   page_address(pq_hw[1]), PAGE_SIZE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		dev_err(dev, "Self-test q failed compare, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	/* test correct zero sum using the software generated pq values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 					 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	zero_sum_result = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 				      pq_src, IOP_ADMA_NUM_SRC_TEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 				      raid6_gfexp, PAGE_SIZE, &zero_sum_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 				      DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	cookie = iop_adma_tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	iop_adma_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	msleep(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	if (iop_adma_status(dma_chan, cookie, NULL) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	if (zero_sum_result != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			zero_sum_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	/* test incorrect zero sum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	i = IOP_ADMA_NUM_SRC_TEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	memset(pq_sw[i] + 100, 0, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	memset(pq_sw[i+1] + 200, 0, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 					 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	zero_sum_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 				      pq_src, IOP_ADMA_NUM_SRC_TEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 				      raid6_gfexp, PAGE_SIZE, &zero_sum_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 				      DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	cookie = iop_adma_tx_submit(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	iop_adma_issue_pending(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	msleep(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	if (iop_adma_status(dma_chan, cookie, NULL) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			zero_sum_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		goto free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) free_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	iop_adma_free_chan_resources(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	i = ARRAY_SIZE(pq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		__free_page(pq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static int iop_adma_remove(struct platform_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	struct iop_adma_device *device = platform_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	struct dma_chan *chan, *_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	struct iop_adma_chan *iop_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	dma_async_device_unregister(&device->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	dma_free_coherent(&dev->dev, plat_data->pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			device->dma_desc_pool_virt, device->dma_desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	list_for_each_entry_safe(chan, _chan, &device->common.channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 				device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		iop_chan = to_iop_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		list_del(&chan->device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		kfree(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	kfree(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static int iop_adma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	struct iop_adma_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	struct iop_adma_chan *iop_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	struct dma_device *dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	if (!devm_request_mem_region(&pdev->dev, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 				resource_size(res), pdev->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	adev = kzalloc(sizeof(*adev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	if (!adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	dma_dev = &adev->common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	/* allocate coherent memory for hardware descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	 * note: writecombine gives slightly better performance, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	 * requires that we explicitly flush the writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 						plat_data->pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 						&adev->dma_desc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 						GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	if (!adev->dma_desc_pool_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		goto err_free_adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		__func__, adev->dma_desc_pool_virt, &adev->dma_desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	adev->id = plat_data->hw_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	/* discover transaction capabilites from the platform data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	dma_dev->cap_mask = plat_data->cap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	adev->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	platform_set_drvdata(pdev, adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	INIT_LIST_HEAD(&dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	/* set base routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	dma_dev->device_tx_status = iop_adma_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	dma_dev->device_issue_pending = iop_adma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	dma_dev->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	/* set prep routines based on capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		dma_dev->max_xor = iop_adma_get_max_xor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		dma_dev->device_prep_dma_xor_val =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			iop_adma_prep_dma_xor_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		dma_dev->device_prep_dma_pq_val =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			iop_adma_prep_dma_pq_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		dma_dev->device_prep_dma_interrupt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 			iop_adma_prep_dma_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	if (!iop_chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		goto err_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	iop_chan->device = adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 					resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	if (!iop_chan->mmr_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		goto err_free_iop_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	tasklet_setup(&iop_chan->irq_tasklet, iop_adma_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	/* clear errors before enabling interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	iop_adma_device_clear_err_status(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		static const irq_handler_t handler[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			iop_adma_eot_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			iop_adma_eoc_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			iop_adma_err_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		int irq = platform_get_irq(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		if (irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			goto err_free_iop_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 			ret = devm_request_irq(&pdev->dev, irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 					handler[i], 0, pdev->name, iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 				goto err_free_iop_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	spin_lock_init(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	INIT_LIST_HEAD(&iop_chan->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	INIT_LIST_HEAD(&iop_chan->all_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	iop_chan->common.device = dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	dma_cookie_init(&iop_chan->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		ret = iop_adma_memcpy_self_test(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 			goto err_free_iop_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		ret = iop_adma_xor_val_self_test(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			goto err_free_iop_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	    dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		#ifdef CONFIG_RAID6_PQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		ret = iop_adma_pq_zero_sum_self_test(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		#else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		/* can not test raid6, so do not publish capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		#endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 			goto err_free_iop_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	dma_async_device_register(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  err_free_iop_chan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	kfree(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  err_free_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 			adev->dma_desc_pool_virt, adev->dma_desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)  err_free_adev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	kfree(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	struct iop_adma_desc_slot *sw_desc, *grp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		grp_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		async_tx_ack(&sw_desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		iop_desc_init_memcpy(grp_start, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		iop_desc_set_byte_count(grp_start, iop_chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		iop_desc_set_dest_addr(grp_start, iop_chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		iop_desc_set_memcpy_src_addr(grp_start, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		cookie = dma_cookie_assign(&sw_desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		/* initialize the completed cookie to be less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		 * the most recently used cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		iop_chan->common.completed_cookie = cookie - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		/* channel should not be busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		BUG_ON(iop_chan_is_busy(iop_chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		/* clear any prior error-status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		iop_adma_device_clear_err_status(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		/* disable operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		iop_chan_disable(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		/* set the descriptor address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		/* 1/ don't add pre-chained descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		 * 2/ dummy read to flush next_desc write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		BUG_ON(iop_desc_get_next_desc(sw_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		/* run the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		iop_chan_enable(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		dev_err(iop_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 			"failed to allocate null descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	struct iop_adma_desc_slot *sw_desc, *grp_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	spin_lock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		grp_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		async_tx_ack(&sw_desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		iop_desc_init_null_xor(grp_start, 2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		iop_desc_set_byte_count(grp_start, iop_chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		iop_desc_set_dest_addr(grp_start, iop_chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		iop_desc_set_xor_src_addr(grp_start, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		iop_desc_set_xor_src_addr(grp_start, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		cookie = dma_cookie_assign(&sw_desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		/* initialize the completed cookie to be less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		 * the most recently used cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		iop_chan->common.completed_cookie = cookie - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		/* channel should not be busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		BUG_ON(iop_chan_is_busy(iop_chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		/* clear any prior error-status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		iop_adma_device_clear_err_status(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		/* disable operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		iop_chan_disable(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		/* set the descriptor address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		/* 1/ don't add pre-chained descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		 * 2/ dummy read to flush next_desc write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		BUG_ON(iop_desc_get_next_desc(sw_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		/* run the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		iop_chan_enable(iop_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		dev_err(iop_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 			"failed to allocate null descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	spin_unlock_bh(&iop_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static struct platform_driver iop_adma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	.probe		= iop_adma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	.remove		= iop_adma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		.name	= "iop-adma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) module_platform_driver(iop_adma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) MODULE_AUTHOR("Intel Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) MODULE_DESCRIPTION("IOP ADMA Engine Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) MODULE_ALIAS("platform:iop-adma");