^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Intel I/OAT DMA Linux driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright(c) 2004 - 2015 Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "registers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "hw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define MAX_SCF 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /* provide a lookup table for setting the source address in the base or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * extended descriptor of an xor or pq descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static const u8 xor_idx_to_desc = 0xe0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static const u8 pq_idx_to_desc = 0xf8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 2, 2, 2, 2, 2, 2, 2 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) 0, 1, 2, 3, 4, 5, 6 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static void xor_set_src(struct ioat_raw_descriptor *descs[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) dma_addr_t addr, u32 offset, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) raw->field[xor_idx_to_field[idx]] = addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return raw->field[pq_idx_to_field[idx]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return raw->field[pq16_idx_to_field[idx]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static void pq_set_src(struct ioat_raw_descriptor *descs[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) dma_addr_t addr, u32 offset, u8 coef, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) raw->field[pq_idx_to_field[idx]] = addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) pq->coef[idx] = coef;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct ioat_pq16a_descriptor *pq16 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) (struct ioat_pq16a_descriptor *)desc[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) raw->field[pq16_idx_to_field[idx]] = addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (idx < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) pq->coef[idx] = coef;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pq16->coef[idx - 8] = coef;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static struct ioat_sed_ent *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct ioat_sed_ent *sed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) sed = kmem_cache_alloc(ioat_sed_cache, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!sed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) sed->hw_pool = hw_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) flags, &sed->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!sed->hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) kmem_cache_free(ioat_sed_cache, sed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return sed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) dma_addr_t dma_src, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct ioat_dma_descriptor *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) dma_addr_t dst = dma_dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) dma_addr_t src = dma_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) size_t total_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int num_descs, idx, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) num_descs = ioat_xferlen_to_descs(ioat_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (likely(num_descs) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ioat_check_space_lock(ioat_chan, num_descs) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) idx = ioat_chan->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) desc = ioat_get_ring_ent(ioat_chan, idx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) hw = desc->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) hw->size = copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) hw->ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) hw->src_addr = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) hw->dst_addr = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) len -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) dst += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) src += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) dump_desc_dbg(ioat_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) } while (++i < num_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) desc->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) desc->len = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) hw->ctl_f.compl_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) dump_desc_dbg(ioat_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* we leave the channel locked to ensure in order submission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) __ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct ioat_ring_ent *compl_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct ioat_ring_ent *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) size_t total_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct ioat_xor_descriptor *xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct ioat_xor_ext_descriptor *xor_ex = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct ioat_dma_descriptor *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int num_descs, with_ext, idx, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u32 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) BUG_ON(src_cnt < 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) num_descs = ioat_xferlen_to_descs(ioat_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* we need 2x the number of descriptors to cover greater than 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (src_cnt > 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) with_ext = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) num_descs *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) with_ext = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* completion writes from the raid engine may pass completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * writes from the legacy engine, so we need one extra null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * (legacy) descriptor to ensure all completion writes arrive in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (likely(num_descs) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ioat_check_space_lock(ioat_chan, num_descs+1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) idx = ioat_chan->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct ioat_raw_descriptor *descs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) size_t xfer_size = min_t(size_t,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) len, 1 << ioat_chan->xfercap_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) desc = ioat_get_ring_ent(ioat_chan, idx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) xor = desc->xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* save a branch by unconditionally retrieving the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * extended descriptor xor_set_src() knows to not write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * to it in the single descriptor case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ext = ioat_get_ring_ent(ioat_chan, idx + i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) xor_ex = ext->xor_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) descs[0] = (struct ioat_raw_descriptor *) xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) descs[1] = (struct ioat_raw_descriptor *) xor_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) for (s = 0; s < src_cnt; s++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) xor_set_src(descs, src[s], offset, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) xor->size = xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) xor->dst_addr = dest + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) xor->ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) xor->ctl_f.op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) len -= xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) offset += xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) dump_desc_dbg(ioat_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) } while ((i += 1 + with_ext) < num_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* last xor descriptor carries the unmap parameters and fence bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) desc->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) desc->len = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) desc->result = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /* completion descriptor carries interrupt bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) hw = compl_desc->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) hw->ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) hw->ctl_f.null = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) hw->ctl_f.compl_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) hw->size = NULL_DESC_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dump_desc_dbg(ioat_chan, compl_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* we leave the channel locked to ensure in order submission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return &compl_desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned int src_cnt, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) unsigned int src_cnt, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) enum sum_check_flags *result, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* the cleanup routine only sets bits on validate failure, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * does not clear bits on validate success... so clear it here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return __ioat_prep_xor_lock(chan, result, src[0], &src[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) src_cnt - 1, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct ioat_ring_ent *ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct device *dev = to_dev(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct ioat_pq_descriptor *pq = desc->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) " src_cnt: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) desc_id(desc), (unsigned long long) desc->txd.phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pq->ctl_f.int_en, pq->ctl_f.compl_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) pq->ctl_f.src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) for (i = 0; i < src_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct ioat_ring_ent *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct device *dev = to_dev(ioat_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct ioat_pq_descriptor *pq = desc->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct ioat_raw_descriptor *descs[] = { (void *)pq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) (void *)pq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) (void *)pq };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (desc->sed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) descs[1] = (void *)desc->sed->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) descs[2] = (void *)desc->sed->hw + 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) " src_cnt: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) desc_id(desc), (unsigned long long) desc->txd.phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) (unsigned long long) pq->next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) desc->txd.flags, pq->size, pq->ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) pq->ctl_f.op, pq->ctl_f.int_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) pq->ctl_f.compl_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) pq->ctl_f.src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) for (i = 0; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) (unsigned long long) pq16_get_src(descs, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pq->coef[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) __ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) const dma_addr_t *dst, const dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned int src_cnt, const unsigned char *scf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct ioat_ring_ent *compl_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct ioat_ring_ent *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) size_t total_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct ioat_pq_descriptor *pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct ioat_pq_ext_descriptor *pq_ex = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct ioat_dma_descriptor *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) u32 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int i, s, idx, with_ext, num_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* the engine requires at least two sources (we provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * at least 1 implied source in the DMA_PREP_CONTINUE case)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) BUG_ON(src_cnt + dmaf_continue(flags) < 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) num_descs = ioat_xferlen_to_descs(ioat_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* we need 2x the number of descriptors to cover greater than 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * sources (we need 1 extra source in the q-only continuation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * case and 3 extra sources in the p+q continuation case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) with_ext = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) num_descs *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) with_ext = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* completion writes from the raid engine may pass completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * writes from the legacy engine, so we need one extra null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * (legacy) descriptor to ensure all completion writes arrive in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (likely(num_descs) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) idx = ioat_chan->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct ioat_raw_descriptor *descs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) size_t xfer_size = min_t(size_t, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 1 << ioat_chan->xfercap_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) desc = ioat_get_ring_ent(ioat_chan, idx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pq = desc->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* save a branch by unconditionally retrieving the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * extended descriptor pq_set_src() knows to not write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * to it in the single descriptor case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) pq_ex = ext->pq_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) descs[0] = (struct ioat_raw_descriptor *) pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) descs[1] = (struct ioat_raw_descriptor *) pq_ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) for (s = 0; s < src_cnt; s++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) pq_set_src(descs, src[s], offset, scf[s], s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* see the comment for dma_maxpq in include/linux/dmaengine.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (dmaf_p_disabled_continue(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pq_set_src(descs, dst[1], offset, 1, s++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) else if (dmaf_continue(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) pq_set_src(descs, dst[0], offset, 0, s++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) pq_set_src(descs, dst[1], offset, 1, s++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) pq_set_src(descs, dst[1], offset, 0, s++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) pq->size = xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) pq->p_addr = dst[0] + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) pq->q_addr = dst[1] + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) pq->ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) pq->ctl_f.op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* we turn on descriptor write back error status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (ioat_dma->cap & IOAT_CAP_DWBES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) pq->ctl_f.wb_en = result ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) pq->ctl_f.src_cnt = src_cnt_to_hw(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) len -= xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) offset += xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) } while ((i += 1 + with_ext) < num_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* last pq descriptor carries the unmap parameters and fence bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) desc->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) desc->len = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) desc->result = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) dump_pq_desc_dbg(ioat_chan, desc, ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (!cb32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pq->ctl_f.compl_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) compl_desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* completion descriptor carries interrupt bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) hw = compl_desc->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) hw->ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) hw->ctl_f.null = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) hw->ctl_f.compl_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) hw->size = NULL_DESC_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dump_desc_dbg(ioat_chan, compl_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* we leave the channel locked to ensure in order submission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return &compl_desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) __ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) const dma_addr_t *dst, const dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) unsigned int src_cnt, const unsigned char *scf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) size_t total_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct ioat_pq_descriptor *pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) u32 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) u8 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) int i, s, idx, num_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* this function is only called with 9-16 sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) num_descs = ioat_xferlen_to_descs(ioat_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * 16 source pq is only available on cb3.3 and has no completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * write hw bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) idx = ioat_chan->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct ioat_raw_descriptor *descs[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) size_t xfer_size = min_t(size_t, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 1 << ioat_chan->xfercap_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) desc = ioat_get_ring_ent(ioat_chan, idx + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pq = desc->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) descs[0] = (struct ioat_raw_descriptor *) pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (!desc->sed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) dev_err(to_dev(ioat_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) "%s: no free sed entries\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) pq->sed_addr = desc->sed->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) desc->sed->parent = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) descs[2] = (void *)descs[1] + 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) for (s = 0; s < src_cnt; s++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) pq16_set_src(descs, src[s], offset, scf[s], s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* see the comment for dma_maxpq in include/linux/dmaengine.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (dmaf_p_disabled_continue(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) pq16_set_src(descs, dst[1], offset, 1, s++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) else if (dmaf_continue(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) pq16_set_src(descs, dst[0], offset, 0, s++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pq16_set_src(descs, dst[1], offset, 1, s++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) pq16_set_src(descs, dst[1], offset, 0, s++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) pq->size = xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pq->p_addr = dst[0] + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) pq->q_addr = dst[1] + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) pq->ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) pq->ctl_f.op = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* we turn on descriptor write back error status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (ioat_dma->cap & IOAT_CAP_DWBES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) pq->ctl_f.wb_en = result ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) len -= xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) offset += xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) } while (++i < num_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /* last pq descriptor carries the unmap parameters and fence bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) desc->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) desc->len = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) desc->result = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* with cb3.3 we should be able to do completion w/o a null desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) pq->ctl_f.compl_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) dump_pq16_desc_dbg(ioat_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* we leave the channel locked to ensure in order submission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (dmaf_p_disabled_continue(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return src_cnt + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) else if (dmaf_continue(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return src_cnt + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) unsigned int src_cnt, const unsigned char *scf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* specify valid address for disabled result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (flags & DMA_PREP_PQ_DISABLE_P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dst[0] = dst[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (flags & DMA_PREP_PQ_DISABLE_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) dst[1] = dst[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* handle the single source multiply case from the raid6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * recovery path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dma_addr_t single_source[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unsigned char single_source_coef[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) single_source[0] = src[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) single_source[1] = src[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) single_source_coef[0] = scf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) single_source_coef[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return src_cnt_flags(src_cnt, flags) > 8 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) __ioat_prep_pq16_lock(chan, NULL, dst, single_source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 2, single_source_coef, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) flags) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) __ioat_prep_pq_lock(chan, NULL, dst, single_source, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) single_source_coef, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return src_cnt_flags(src_cnt, flags) > 8 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) __ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) scf, len, flags) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) __ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) scf, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned int src_cnt, const unsigned char *scf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) enum sum_check_flags *pqres, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* specify valid address for disabled result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (flags & DMA_PREP_PQ_DISABLE_P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) pq[0] = pq[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (flags & DMA_PREP_PQ_DISABLE_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) pq[1] = pq[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* the cleanup routine only sets bits on validate failure, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * does not clear bits on validate success... so clear it here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) *pqres = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return src_cnt_flags(src_cnt, flags) > 8 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) flags) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unsigned int src_cnt, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned char scf[MAX_SCF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) dma_addr_t pq[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (src_cnt > MAX_SCF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) memset(scf, 0, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) pq[0] = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) flags |= DMA_PREP_PQ_DISABLE_Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) pq[1] = dst; /* specify valid address for disabled result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return src_cnt_flags(src_cnt, flags) > 8 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) flags) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) unsigned int src_cnt, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) enum sum_check_flags *result, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned char scf[MAX_SCF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) dma_addr_t pq[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (src_cnt > MAX_SCF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* the cleanup routine only sets bits on validate failure, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * does not clear bits on validate success... so clear it here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) *result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) memset(scf, 0, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) pq[0] = src[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) flags |= DMA_PREP_PQ_DISABLE_Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) pq[1] = pq[0]; /* specify valid address for disabled result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return src_cnt_flags(src_cnt, flags) > 8 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) scf, len, flags) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) scf, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct ioat_ring_ent *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct ioat_dma_descriptor *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (ioat_check_space_lock(ioat_chan, 1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) hw = desc->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) hw->ctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) hw->ctl_f.null = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) hw->ctl_f.int_en = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) hw->ctl_f.compl_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) hw->size = NULL_DESC_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) hw->src_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) hw->dst_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) desc->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) desc->len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dump_desc_dbg(ioat_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* we leave the channel locked to ensure in order submission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)