Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright(c) 2009 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/raid/pq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/async_tx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * pq_scribble_page - space to hold throwaway P or Q buffer for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * synchronous gen_syndrome
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static struct page *pq_scribble_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) /* the struct page *blocks[] parameter passed to async_gen_syndrome()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * and async_syndrome_val() contains the 'P' destination address at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * note: these are macros as they are used as lvalues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define P(b, d) (b[d-2])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define Q(b, d) (b[d-1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define MAX_DISKS 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * do_async_gen_syndrome - asynchronously calculate P and/or Q
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static __async_inline struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) do_async_gen_syndrome(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		      const unsigned char *scfs, int disks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		      struct dmaengine_unmap_data *unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		      enum dma_ctrl_flags dma_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		      struct async_submit_ctl *submit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct dma_async_tx_descriptor *tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct dma_device *dma = chan->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	enum async_tx_flags flags_orig = submit->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	dma_async_tx_callback cb_param_orig = submit->cb_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	int src_cnt = disks - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	unsigned short pq_src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	dma_addr_t dma_dest[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	int src_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	while (src_cnt > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		submit->flags = flags_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		/* if we are submitting additional pqs, leave the chain open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		 * clear the callback parameters, and leave the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		 * buffers mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		if (src_cnt > pq_src_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 			submit->flags &= ~ASYNC_TX_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 			submit->flags |= ASYNC_TX_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			submit->cb_fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			submit->cb_param = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			submit->cb_fn = cb_fn_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 			submit->cb_param = cb_param_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 			if (cb_fn_orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 				dma_flags |= DMA_PREP_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		if (submit->flags & ASYNC_TX_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 			dma_flags |= DMA_PREP_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		/* Drivers force forward progress in case they can not provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		 * a descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			dma_dest[0] = unmap->addr[disks - 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			dma_dest[1] = unmap->addr[disks - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			tx = dma->device_prep_dma_pq(chan, dma_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 						     &unmap->addr[src_off],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 						     pq_src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 						     &scfs[src_off], unmap->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 						     dma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			if (likely(tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			async_tx_quiesce(&submit->depend_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		dma_set_unmap(tx, unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		async_tx_submit(chan, tx, submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		submit->depend_tx = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		/* drop completed sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		src_cnt -= pq_src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		src_off += pq_src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		dma_flags |= DMA_PREP_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	return tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		     size_t len, struct async_submit_ctl *submit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	void **srcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	int start = -1, stop = disks - 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	if (submit->scribble)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		srcs = submit->scribble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		srcs = (void **) blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	for (i = 0; i < disks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		if (blocks[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			BUG_ON(i > disks - 3); /* P or Q can't be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			srcs[i] = (void*)raid6_empty_zero_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 			srcs[i] = page_address(blocks[i]) + offsets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			if (i < disks - 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 				stop = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 				if (start == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 					start = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (submit->flags & ASYNC_TX_PQ_XOR_DST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		BUG_ON(!raid6_call.xor_syndrome);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		if (start >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			raid6_call.xor_syndrome(disks, start, stop, len, srcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		raid6_call.gen_syndrome(disks, len, srcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	async_tx_sync_epilog(submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 				     int src_cnt, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	for (i = 0; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		if (!is_dma_pq_aligned(dev, offs[i], 0, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * async_gen_syndrome - asynchronously calculate a raid6 syndrome
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * @offsets: offset array into each block (src and dest) to start transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * @disks: number of blocks (including missing P or Q, see below)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * @len: length of operation in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * @submit: submission/completion modifiers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * General note: This routine assumes a field of GF(2^8) with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * primitive polynomial of 0x11d and a generator of {02}.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * 'disks' note: callers can optionally omit either P or Q (but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * both) from the calculation by setting blocks[disks-2] or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * PAGE_SIZE as a temporary buffer of this size is used in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * synchronous path.  'disks' always accounts for both destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * buffers.  If any source buffers (blocks[i] where i < disks - 2) are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * set to NULL those buffers will be replaced with the raid6_zero_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * in the synchronous path and omitted in the hardware-asynchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		   size_t len, struct async_submit_ctl *submit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	int src_cnt = disks - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 						      &P(blocks, disks), 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 						      blocks, src_cnt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	struct dma_device *device = chan ? chan->device : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct dmaengine_unmap_data *unmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	/* XORing P/Q is only implemented in software */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	    (src_cnt <= dma_maxpq(device, 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	     dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	    is_dma_pq_aligned_offs(device, offsets, disks, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		enum dma_ctrl_flags dma_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		unsigned char coefs[MAX_DISKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		/* run the p+q asynchronously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		pr_debug("%s: (async) disks: %d len: %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			 __func__, disks, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		/* convert source addresses being careful to collapse 'empty'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		 * sources and update the coefficients accordingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		unmap->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		for (i = 0, j = 0; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			if (blocks[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			unmap->addr[j] = dma_map_page(device->dev, blocks[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 						offsets[i], len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			coefs[j] = raid6_gfexp[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			unmap->to_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		 * DMAs use destinations as sources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		 * so use BIDIRECTIONAL mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		unmap->bidi_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		if (P(blocks, disks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 							P(offsets, disks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 							len, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			unmap->addr[j++] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			dma_flags |= DMA_PREP_PQ_DISABLE_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		unmap->bidi_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		if (Q(blocks, disks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 							Q(offsets, disks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 							len, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			unmap->addr[j++] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		dmaengine_unmap_put(unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		return tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	dmaengine_unmap_put(unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	/* run the pq synchronously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	/* wait for any prerequisite operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	async_tx_quiesce(&submit->depend_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (!P(blocks, disks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		P(blocks, disks) = pq_scribble_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		P(offsets, disks) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (!Q(blocks, disks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		Q(blocks, disks) = pq_scribble_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		Q(offsets, disks) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	do_sync_gen_syndrome(blocks, offsets, disks, len, submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) EXPORT_SYMBOL_GPL(async_gen_syndrome);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static inline struct dma_chan *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	#endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 				     disks, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  * async_syndrome_val - asynchronously validate a raid6 syndrome
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * @offset: common offset into each block (src and dest) to start transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * @disks: number of blocks (including missing P or Q, see below)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * @len: length of operation in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * @spare: temporary result buffer for the synchronous case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * @s_off: spare buffer page offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * @submit: submission / completion modifiers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * The same notes from async_gen_syndrome apply to the 'blocks',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * and 'disks' parameters of this routine.  The synchronous path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * requires a temporary result buffer and submit->scribble to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		   size_t len, enum sum_check_flags *pqres, struct page *spare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		   unsigned int s_off, struct async_submit_ctl *submit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	struct dma_device *device = chan ? chan->device : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	unsigned char coefs[MAX_DISKS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct dmaengine_unmap_data *unmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	BUG_ON(disks < 4 || disks > MAX_DISKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	if (device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	if (unmap && disks <= dma_maxpq(device, 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	    is_dma_pq_aligned_offs(device, offsets, disks, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		struct device *dev = device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		dma_addr_t pq[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		int i, j = 0, src_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		pr_debug("%s: (async) disks: %d len: %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			 __func__, disks, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		unmap->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		for (i = 0; i < disks-2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			if (likely(blocks[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 				unmap->addr[j] = dma_map_page(dev, blocks[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 							      offsets[i], len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 							      DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 				coefs[j] = raid6_gfexp[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 				unmap->to_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 				src_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 				j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		if (!P(blocks, disks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			pq[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			dma_flags |= DMA_PREP_PQ_DISABLE_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			pq[0] = dma_map_page(dev, P(blocks, disks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 					     P(offsets, disks), len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 					     DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			unmap->addr[j++] = pq[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			unmap->to_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		if (!Q(blocks, disks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			pq[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			pq[1] = dma_map_page(dev, Q(blocks, disks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 					     Q(offsets, disks), len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 					     DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			unmap->addr[j++] = pq[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			unmap->to_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		if (submit->flags & ASYNC_TX_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			dma_flags |= DMA_PREP_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			tx = device->device_prep_dma_pq_val(chan, pq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 							    unmap->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 							    src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 							    coefs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 							    len, pqres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 							    dma_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			if (likely(tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			async_tx_quiesce(&submit->depend_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		dma_set_unmap(tx, unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		async_tx_submit(chan, tx, submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		struct page *p_src = P(blocks, disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		unsigned int p_off = P(offsets, disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		struct page *q_src = Q(blocks, disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		unsigned int q_off = Q(offsets, disks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		enum async_tx_flags flags_orig = submit->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		dma_async_tx_callback cb_fn_orig = submit->cb_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		void *scribble = submit->scribble;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		void *cb_param_orig = submit->cb_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		void *p, *q, *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		pr_debug("%s: (sync) disks: %d len: %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			 __func__, disks, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		/* caller must provide a temporary result buffer and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		 * allow the input parameters to be preserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		BUG_ON(!spare || !scribble);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		/* wait for any prerequisite operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		async_tx_quiesce(&submit->depend_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		/* recompute p and/or q into the temporary buffer and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		 * check to see the result matches the current value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		*pqres = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		if (p_src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 					  NULL, NULL, scribble);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			tx = async_xor_offs(spare, s_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 					blocks, offsets, disks-2, len, submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 			async_tx_quiesce(&tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			p = page_address(p_src) + p_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			s = page_address(spare) + s_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		if (q_src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			P(blocks, disks) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			Q(blocks, disks) = spare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			Q(offsets, disks) = s_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			tx = async_gen_syndrome(blocks, offsets, disks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 					len, submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			async_tx_quiesce(&tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			q = page_address(q_src) + q_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			s = page_address(spare) + s_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		/* restore P, Q and submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		P(blocks, disks) = p_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		P(offsets, disks) = p_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		Q(blocks, disks) = q_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		Q(offsets, disks) = q_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		submit->cb_fn = cb_fn_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		submit->cb_param = cb_param_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		submit->flags = flags_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		async_tx_sync_epilog(submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	dmaengine_unmap_put(unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	return tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) EXPORT_SYMBOL_GPL(async_syndrome_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static int __init async_pq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	pq_scribble_page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	if (pq_scribble_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	pr_err("%s: failed to allocate required spare page\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void __exit async_pq_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	__free_page(pq_scribble_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) module_init(async_pq_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) module_exit(async_pq_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) MODULE_LICENSE("GPL");