^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2006-2009 DENX Software Engineering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Yuri Tikhonov <yur@emcraft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Further porting to arch/powerpc by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Anatolij Gustschin <agust@denx.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This driver supports the asynchrounous DMA copy and RAID engines available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * on the AMCC PPC440SPe Processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * ADMA driver written by D.Williams.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/async_tx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/dcr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/dcr-regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "adma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) enum ppc_adma_init_code {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) PPC_ADMA_INIT_OK = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) PPC_ADMA_INIT_MEMRES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) PPC_ADMA_INIT_MEMREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) PPC_ADMA_INIT_ALLOC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) PPC_ADMA_INIT_COHERENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) PPC_ADMA_INIT_CHANNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) PPC_ADMA_INIT_IRQ1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) PPC_ADMA_INIT_IRQ2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) PPC_ADMA_INIT_REGISTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static char *ppc_adma_errors[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) [PPC_ADMA_INIT_OK] = "ok",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) [PPC_ADMA_INIT_MEMRES] = "failed to get memory resource",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) [PPC_ADMA_INIT_MEMREG] = "failed to request memory region",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) [PPC_ADMA_INIT_ALLOC] = "failed to allocate memory for adev "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) "structure",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) [PPC_ADMA_INIT_COHERENT] = "failed to allocate coherent memory for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) "hardware descriptors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) [PPC_ADMA_INIT_CHANNEL] = "failed to allocate memory for channel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) [PPC_ADMA_INIT_IRQ1] = "failed to request first irq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) [PPC_ADMA_INIT_IRQ2] = "failed to request second irq",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) [PPC_ADMA_INIT_REGISTER] = "failed to register dma async device",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static enum ppc_adma_init_code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ppc440spe_adma_devices[PPC440SPE_ADMA_ENGINES_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct ppc_dma_chan_ref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* The list of channels exported by ppc440spe ADMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct list_head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* This flag is set when want to refetch the xor chain in the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static u32 do_xor_refetch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* Pointer to DMA0, DMA1 CP/CS FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void *ppc440spe_dma_fifo_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Pointers to last submitted to DMA0, DMA1 CDBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static struct ppc440spe_adma_desc_slot *chan_last_sub[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static struct ppc440spe_adma_desc_slot *chan_first_cdb[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Pointer to last linked and submitted xor CB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static struct ppc440spe_adma_desc_slot *xor_last_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static struct ppc440spe_adma_desc_slot *xor_last_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* This array is used in data-check operations for storing a pattern */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static char ppc440spe_qword[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static atomic_t ppc440spe_adma_err_irq_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static dcr_host_t ppc440spe_mq_dcr_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static unsigned int ppc440spe_mq_dcr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * the block size in transactions, then we do not allow to activate more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * only one RXOR transactions simultaneously. So use this var to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * the information about is RXOR currently active (PPC440SPE_RXOR_RUN bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * set) or not (PPC440SPE_RXOR_RUN is clear).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static unsigned long ppc440spe_rxor_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* These are used in enable & check routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static u32 ppc440spe_r6_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static struct ppc440spe_adma_chan *ppc440spe_r6_tchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static struct completion ppc440spe_r6_test_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static int ppc440spe_adma_dma2rxor_prep_src(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct ppc440spe_rxor *cursor, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int src_cnt, u32 addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void ppc440spe_adma_dma2rxor_set_src(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int index, dma_addr_t addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void ppc440spe_adma_dma2rxor_set_mult(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int index, u8 mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #ifdef ADMA_LL_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define ADMA_LL_DBG(x) ({ if (1) x; 0; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define ADMA_LL_DBG(x) ({ if (0) x; 0; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void print_cb(struct ppc440spe_adma_chan *chan, void *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct dma_cdb *cdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct xor_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) cdb = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) pr_debug("CDB at %p [%d]:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) "\t attr 0x%02x opc 0x%02x cnt 0x%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) "\t sg1u 0x%08x sg1l 0x%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) "\t sg2u 0x%08x sg2l 0x%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) "\t sg3u 0x%08x sg3l 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cdb, chan->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cdb->attr, cdb->opc, le32_to_cpu(cdb->cnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) le32_to_cpu(cdb->sg1u), le32_to_cpu(cdb->sg1l),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) le32_to_cpu(cdb->sg2u), le32_to_cpu(cdb->sg2l),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) le32_to_cpu(cdb->sg3u), le32_to_cpu(cdb->sg3l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) cb = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) pr_debug("CB at %p [%d]:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) "\t cbc 0x%08x cbbc 0x%08x cbs 0x%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) "\t cbtah 0x%08x cbtal 0x%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) "\t cblah 0x%08x cblal 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) cb, chan->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cb->cbc, cb->cbbc, cb->cbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) cb->cbtah, cb->cbtal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) cb->cblah, cb->cblal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (i && !cb->ops[i].h && !cb->ops[i].l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pr_debug("\t ops[%2d]: h 0x%08x l 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) i, cb->ops[i].h, cb->ops[i].l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void print_cb_list(struct ppc440spe_adma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct ppc440spe_adma_desc_slot *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) for (; iter; iter = iter->hw_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) print_cb(chan, iter->hw_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void prep_dma_xor_dbg(int id, dma_addr_t dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) unsigned int src_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pr_debug("\n%s(%d):\nsrc: ", __func__, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) for (i = 0; i < src_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) pr_debug("\t0x%016llx ", src[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pr_debug("dst:\n\t0x%016llx\n", dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static void prep_dma_pq_dbg(int id, dma_addr_t *dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned int src_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pr_debug("\n%s(%d):\nsrc: ", __func__, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) for (i = 0; i < src_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pr_debug("\t0x%016llx ", src[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) pr_debug("dst: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) for (i = 0; i < 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pr_debug("\t0x%016llx ", dst[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void prep_dma_pqzero_sum_dbg(int id, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned int src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) const unsigned char *scf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) pr_debug("\n%s(%d):\nsrc(coef): ", __func__, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (scf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) for (i = 0; i < src_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pr_debug("\t0x%016llx(0x%02x) ", src[i], scf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) for (i = 0; i < src_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) pr_debug("\t0x%016llx(no) ", src[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) pr_debug("dst: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) for (i = 0; i < 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) pr_debug("\t0x%016llx ", src[src_cnt + i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Command (Descriptor) Blocks low-level routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ******************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * ppc440spe_desc_init_interrupt - initialize the descriptor for INTERRUPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * pseudo operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void ppc440spe_desc_init_interrupt(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct xor_cb *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) p = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) memset(desc->hw_desc, 0, sizeof(struct xor_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* NOP with Command Block Complete Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) p->cbc = XOR_CBCR_CBCE_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* NOP with interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) set_bit(PPC440SPE_DESC_INT, &desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * ppc440spe_desc_init_null_xor - initialize the descriptor for NULL XOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * pseudo operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void ppc440spe_desc_init_null_xor(struct ppc440spe_adma_desc_slot *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) memset(desc->hw_desc, 0, sizeof(struct xor_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) desc->hw_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) desc->src_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) desc->dst_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * ppc440spe_desc_init_xor - initialize the descriptor for XOR operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void ppc440spe_desc_init_xor(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int src_cnt, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct xor_cb *hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) memset(desc->hw_desc, 0, sizeof(struct xor_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) desc->hw_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) desc->src_cnt = src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) desc->dst_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Enable interrupt on completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * ppc440spe_desc_init_dma2pq - initialize the descriptor for PQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * operation in DMA2 controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void ppc440spe_desc_init_dma2pq(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int dst_cnt, int src_cnt, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct xor_cb *hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) memset(desc->hw_desc, 0, sizeof(struct xor_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) desc->hw_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) desc->src_cnt = src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) desc->dst_cnt = dst_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) memset(desc->reverse_flags, 0, sizeof(desc->reverse_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) desc->descs_per_op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) hw_desc->cbc = XOR_CBCR_TGT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Enable interrupt on completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #define DMA_CTRL_FLAGS_LAST DMA_PREP_FENCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #define DMA_PREP_ZERO_P (DMA_CTRL_FLAGS_LAST << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #define DMA_PREP_ZERO_Q (DMA_PREP_ZERO_P << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * ppc440spe_desc_init_dma01pq - initialize the descriptors for PQ operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * with DMA0/1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void ppc440spe_desc_init_dma01pq(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int dst_cnt, int src_cnt, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) unsigned long op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct dma_cdb *hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct ppc440spe_adma_desc_slot *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) u8 dopc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* Common initialization of a PQ descriptors chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) set_bits(op, &desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) desc->src_cnt = src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) desc->dst_cnt = dst_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* WXOR MULTICAST if both P and Q are being computed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * MV_SG1_SG2 if Q only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) list_for_each_entry(iter, &desc->group_list, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (likely(!list_is_last(&iter->chain_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) &desc->group_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* set 'next' pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) iter->hw_next = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct ppc440spe_adma_desc_slot, chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) clear_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* this is the last descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * this slot will be pasted from ADMA level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * each time it wants to configure parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * of the transaction (src, dst, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) iter->hw_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) set_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) clear_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Set OPS depending on WXOR/RXOR type of operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!test_bit(PPC440SPE_DESC_RXOR, &desc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* This is a WXOR only chain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * - first descriptors are for zeroing destinations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * if PPC440SPE_ZERO_P/Q set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * - descriptors remained are for GF-XOR operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) iter = list_first_entry(&desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (test_bit(PPC440SPE_ZERO_P, &desc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) iter = list_first_entry(&iter->chain_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (test_bit(PPC440SPE_ZERO_Q, &desc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) iter = list_first_entry(&iter->chain_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) list_for_each_entry_from(iter, &desc->group_list, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) hw_desc->opc = dopc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* This is either RXOR-only or mixed RXOR/WXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* The first 1 or 2 slots in chain are always RXOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * if need to calculate P & Q, then there are two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * RXOR slots; if only P or only Q, then there is one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) iter = list_first_entry(&desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) iter = list_first_entry(&iter->chain_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* The remaining descs (if any) are WXORs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (test_bit(PPC440SPE_DESC_WXOR, &desc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) iter = list_first_entry(&iter->chain_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) list_for_each_entry_from(iter, &desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) hw_desc->opc = dopc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * ppc440spe_desc_init_dma01pqzero_sum - initialize the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * for PQ_ZERO_SUM operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static void ppc440spe_desc_init_dma01pqzero_sum(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int dst_cnt, int src_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct dma_cdb *hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct ppc440spe_adma_desc_slot *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) u8 dopc = (dst_cnt == 2) ? DMA_CDB_OPC_MULTICAST :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * Initialize starting from 2nd or 3rd descriptor dependent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * on dst_cnt. First one or two slots are for cloning P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * and/or Q to chan->pdest and/or chan->qdest as we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * to preserve original P/Q.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) iter = list_first_entry(&desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct ppc440spe_adma_desc_slot, chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) iter = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct ppc440spe_adma_desc_slot, chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (dst_cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) iter = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct ppc440spe_adma_desc_slot, chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* initialize each source descriptor in chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) list_for_each_entry_from(iter, &desc->group_list, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) iter->src_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) iter->dst_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* This is a ZERO_SUM operation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * - <src_cnt> descriptors starting from 2nd or 3rd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * descriptor are for GF-XOR operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * - remaining <dst_cnt> descriptors are for checking the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (i++ < src_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* MV_SG1_SG2 if only Q is being verified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * MULTICAST if both P and Q are being verified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) hw_desc->opc = dopc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* DMA_CDB_OPC_DCHECK128 operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) hw_desc->opc = DMA_CDB_OPC_DCHECK128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (likely(!list_is_last(&iter->chain_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) &desc->group_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* set 'next' pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) iter->hw_next = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* this is the last descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * this slot will be pasted from ADMA level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * each time it wants to configure parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * of the transaction (src, dst, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) iter->hw_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* always enable interrupt generation since we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * the status of pqzero from the handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) set_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) desc->src_cnt = src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) desc->dst_cnt = dst_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * ppc440spe_desc_init_memcpy - initialize the descriptor for MEMCPY operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct dma_cdb *hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) desc->hw_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) desc->src_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) desc->dst_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) set_bit(PPC440SPE_DESC_INT, &desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) clear_bit(PPC440SPE_DESC_INT, &desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * ppc440spe_desc_set_src_addr - set source address into the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct ppc440spe_adma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int src_idx, dma_addr_t addrh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dma_addr_t addrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct dma_cdb *dma_hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct xor_cb *xor_hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) phys_addr_t addr64, tmplow, tmphi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!addrh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) addr64 = addrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) tmphi = (addr64 >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) tmplow = (addr64 & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) tmphi = addrh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) tmplow = addrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) dma_hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) dma_hw_desc->sg1u |= cpu_to_le32((u32)tmphi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) xor_hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) xor_hw_desc->ops[src_idx].l = addrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) xor_hw_desc->ops[src_idx].h |= addrh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * ppc440spe_desc_set_src_mult - set source address mult into the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct ppc440spe_adma_chan *chan, u32 mult_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) int sg_index, unsigned char mult_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct dma_cdb *dma_hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct xor_cb *xor_hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) u32 *psgu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) dma_hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) switch (sg_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* for RXOR operations set multiplier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * into source cued address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) case DMA_CDB_SG_SRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) psgu = &dma_hw_desc->sg1u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* for WXOR operations set multiplier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * into destination cued address(es)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) case DMA_CDB_SG_DST1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) psgu = &dma_hw_desc->sg2u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) case DMA_CDB_SG_DST2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) psgu = &dma_hw_desc->sg3u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *psgu |= cpu_to_le32(mult_value << mult_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) xor_hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * ppc440spe_desc_set_dest_addr - set destination address into the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static void ppc440spe_desc_set_dest_addr(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct ppc440spe_adma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) dma_addr_t addrh, dma_addr_t addrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) u32 dst_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct dma_cdb *dma_hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct xor_cb *xor_hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) phys_addr_t addr64, tmphi, tmplow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u32 *psgu, *psgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (!addrh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) addr64 = addrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) tmphi = (addr64 >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) tmplow = (addr64 & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) tmphi = addrh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) tmplow = addrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dma_hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) *psgl = cpu_to_le32((u32)tmplow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) *psgu |= cpu_to_le32((u32)tmphi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) xor_hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) xor_hw_desc->cbtal = addrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) xor_hw_desc->cbtah |= addrh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * ppc440spe_desc_set_byte_count - set number of data bytes involved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * into the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static void ppc440spe_desc_set_byte_count(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct ppc440spe_adma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) u32 byte_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct dma_cdb *dma_hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct xor_cb *xor_hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) dma_hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dma_hw_desc->cnt = cpu_to_le32(byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) xor_hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) xor_hw_desc->cbbc = byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * ppc440spe_desc_set_rxor_block_size - set RXOR block size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static inline void ppc440spe_desc_set_rxor_block_size(u32 byte_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* assume that byte_count is aligned on the 512-boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * thus write it directly to the register (bits 23:31 are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * reserved there).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CF2H, byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * ppc440spe_desc_set_dcheck - set CHECK pattern
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static void ppc440spe_desc_set_dcheck(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct ppc440spe_adma_chan *chan, u8 *qword)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct dma_cdb *dma_hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) dma_hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) iowrite32(qword[0], &dma_hw_desc->sg3l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) iowrite32(qword[4], &dma_hw_desc->sg3u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) iowrite32(qword[8], &dma_hw_desc->sg2l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) iowrite32(qword[12], &dma_hw_desc->sg2u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * ppc440spe_xor_set_link - set link address in xor CB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static void ppc440spe_xor_set_link(struct ppc440spe_adma_desc_slot *prev_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct ppc440spe_adma_desc_slot *next_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct xor_cb *xor_hw_desc = prev_desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (unlikely(!next_desc || !(next_desc->phys))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) __func__, next_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) next_desc ? next_desc->phys : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) xor_hw_desc->cbs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) xor_hw_desc->cblal = next_desc->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) xor_hw_desc->cblah = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * ppc440spe_desc_set_link - set the address of descriptor following this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * descriptor in chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct ppc440spe_adma_desc_slot *prev_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) struct ppc440spe_adma_desc_slot *next_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct ppc440spe_adma_desc_slot *tail = next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (unlikely(!prev_desc || !next_desc ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* If previous next is overwritten something is wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * though we may refetch from append to initiate list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * processing; in this case - it's ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) "prev->hw_next=0x%p\n", __func__, prev_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) next_desc, prev_desc ? prev_desc->hw_next : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* do s/w chaining both for DMA and XOR descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) prev_desc->hw_next = next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* bind descriptor to the chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) while (tail->hw_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) tail = tail->hw_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) xor_last_linked = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (prev_desc == xor_last_submit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* do not link to the last submitted CB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ppc440spe_xor_set_link(prev_desc, next_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * ppc440spe_desc_get_link - get the address of the descriptor that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * follows this one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static inline u32 ppc440spe_desc_get_link(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (!desc->hw_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return desc->hw_next->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * ppc440spe_desc_is_aligned - check alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) static inline int ppc440spe_desc_is_aligned(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct ppc440spe_adma_desc_slot *desc, int num_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return (desc->idx & (num_slots - 1)) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * ppc440spe_chan_xor_slot_count - get the number of slots necessary for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * XOR operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static int ppc440spe_chan_xor_slot_count(size_t len, int src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) int *slots_per_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* each XOR descriptor provides up to 16 source operands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (likely(len <= PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) printk(KERN_ERR "%s: len %d > max %d !!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) __func__, len, PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * ppc440spe_dma2_pq_slot_count - get the number of slots necessary for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * DMA2 PQ operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) int src_cnt, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) signed long long order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) int state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int addr_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) for (i = 1; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) dma_addr_t cur_addr = srcs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) dma_addr_t old_addr = srcs[i-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (cur_addr == old_addr + len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* direct RXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) order = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (i == src_cnt-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) addr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) } else if (old_addr == cur_addr + len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /* reverse RXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) order = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (i == src_cnt-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) addr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) state = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (i == src_cnt-2 || (order == -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) && cur_addr != old_addr - len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) addr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) } else if (cur_addr == old_addr + len*order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) state = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (i == src_cnt-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) addr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) } else if (cur_addr == old_addr + 2*len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) state = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (i == src_cnt-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) addr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) } else if (cur_addr == old_addr + 3*len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) state = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (i == src_cnt-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) addr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) addr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) addr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (state == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (src_cnt <= 1 || (state != 1 && state != 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) __func__, src_cnt, state, addr_count, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) for (i = 0; i < src_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * ADMA channel low-level routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ******************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * ppc440spe_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static void ppc440spe_adma_device_clear_eot_status(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct dma_regs *dma_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct xor_regs *xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) u8 *p = chan->device->dma_desc_pool_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct dma_cdb *cdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) u32 rv, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /* read FIFO to ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) dma_reg = chan->device->dma_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) while ((rv = ioread32(&dma_reg->csfpl))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) i = rv & DMA_CDB_ADDR_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) cdb = (struct dma_cdb *)&p[i -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) (u32)chan->device->dma_desc_pool];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* Clear opcode to ack. This is necessary for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * ZeroSum operations only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) cdb->opc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (test_bit(PPC440SPE_RXOR_RUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) &ppc440spe_rxor_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* probably this is a completed RXOR op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * get pointer to CDB using the fact that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * physical and virtual addresses of CDB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * in pools have the same offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (le32_to_cpu(cdb->sg1u) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) DMA_CUED_XOR_BASE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* this is a RXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) clear_bit(PPC440SPE_RXOR_RUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) &ppc440spe_rxor_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (rv & DMA_CDB_STATUS_MSK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* ZeroSum check failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct ppc440spe_adma_desc_slot *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) dma_addr_t phys = rv & ~DMA_CDB_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * Update the status of corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) list_for_each_entry(iter, &chan->chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (iter->phys == phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * if cannot find the corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * slot it's a bug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) BUG_ON(&iter->chain_node == &chan->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (iter->xor_check_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (test_bit(PPC440SPE_DESC_PCHECK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) &iter->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) *iter->xor_check_result |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) SUM_CHECK_P_RESULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (test_bit(PPC440SPE_DESC_QCHECK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) &iter->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) *iter->xor_check_result |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) SUM_CHECK_Q_RESULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) rv = ioread32(&dma_reg->dsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) pr_err("DMA%d err status: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) chan->device->id, rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /* write back to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) iowrite32(rv, &dma_reg->dsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* reset status bits to ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) xor_reg = chan->device->xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) rv = ioread32be(&xor_reg->sr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) iowrite32be(rv, &xor_reg->sr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (rv & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (rv & XOR_IE_RPTIE_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /* Read PLB Timeout Error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * Try to resubmit the CB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) u32 val = ioread32be(&xor_reg->ccbalr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) iowrite32be(val, &xor_reg->cblalr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) val = ioread32be(&xor_reg->crsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) iowrite32be(val | XOR_CRSR_XAE_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) &xor_reg->crsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) pr_err("XOR ERR 0x%x status\n", rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* if the XORcore is idle, but there are unprocessed CBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * then refetch the s/w chain here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) do_xor_refetch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) ppc440spe_chan_append(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * ppc440spe_chan_is_busy - get the channel status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static int ppc440spe_chan_is_busy(struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct dma_regs *dma_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct xor_regs *xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) int busy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) dma_reg = chan->device->dma_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* if command FIFO's head and tail pointers are equal and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * status tail is the same as command, then channel is free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (ioread16(&dma_reg->cpfhp) != ioread16(&dma_reg->cpftp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ioread16(&dma_reg->cpftp) != ioread16(&dma_reg->csftp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) busy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* use the special status bit for the XORcore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) xor_reg = chan->device->xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) busy = (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * ppc440spe_chan_set_first_xor_descriptor - init XORcore chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static void ppc440spe_chan_set_first_xor_descriptor(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct ppc440spe_adma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct ppc440spe_adma_desc_slot *next_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct xor_regs *xor_reg = chan->device->xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) printk(KERN_INFO "%s: Warn: XORcore is running "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) "when try to set the first CDB!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) xor_last_submit = xor_last_linked = next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) iowrite32be(XOR_CRSR_64BA_BIT, &xor_reg->crsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) iowrite32be(next_desc->phys, &xor_reg->cblalr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) iowrite32be(0, &xor_reg->cblahr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) iowrite32be(ioread32be(&xor_reg->cbcr) | XOR_CBCR_LNK_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) &xor_reg->cbcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) chan->hw_chain_inited = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * ppc440spe_dma_put_desc - put DMA0,1 descriptor to FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * called with irqs disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static void ppc440spe_dma_put_desc(struct ppc440spe_adma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct ppc440spe_adma_desc_slot *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) u32 pcdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct dma_regs *dma_reg = chan->device->dma_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) pcdb = desc->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (!test_bit(PPC440SPE_DESC_INT, &desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) pcdb |= DMA_CDB_NO_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) chan_last_sub[chan->device->id] = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) ADMA_LL_DBG(print_cb(chan, desc->hw_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) iowrite32(pcdb, &dma_reg->cpfpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * ppc440spe_chan_append - update the h/w chain in the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static void ppc440spe_chan_append(struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) struct xor_regs *xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct ppc440spe_adma_desc_slot *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct xor_cb *xcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) u32 cur_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) cur_desc = ppc440spe_chan_get_current_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (likely(cur_desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) iter = chan_last_sub[chan->device->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) BUG_ON(!iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* first peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) iter = chan_first_cdb[chan->device->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) BUG_ON(!iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ppc440spe_dma_put_desc(chan, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) chan->hw_chain_inited = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /* is there something new to append */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (!iter->hw_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* flush descriptors from the s/w queue to fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) list_for_each_entry_continue(iter, &chan->chain, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) ppc440spe_dma_put_desc(chan, iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!iter->hw_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* update h/w links and refetch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (!xor_last_submit->hw_next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) xor_reg = chan->device->xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /* the last linked CDB has to generate an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * that we'd be able to append the next lists to h/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * regardless of the XOR engine state at the moment of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * appending of these next lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) xcb = xor_last_linked->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) xcb->cbc |= XOR_CBCR_CBCE_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (!(ioread32be(&xor_reg->sr) & XOR_SR_XCP_BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* XORcore is idle. Refetch now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) do_xor_refetch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ppc440spe_xor_set_link(xor_last_submit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) xor_last_submit->hw_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ADMA_LL_DBG(print_cb_list(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) xor_last_submit->hw_next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) xor_last_submit = xor_last_linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) iowrite32be(ioread32be(&xor_reg->crsr) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) &xor_reg->crsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* XORcore is running. Refetch later in the handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) do_xor_refetch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * ppc440spe_chan_get_current_descriptor - get the currently executed descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ppc440spe_chan_get_current_descriptor(struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct dma_regs *dma_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct xor_regs *xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (unlikely(!chan->hw_chain_inited))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) /* h/w descriptor chain is not initialized yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) dma_reg = chan->device->dma_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return ioread32(&dma_reg->acpl) & (~DMA_CDB_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) xor_reg = chan->device->xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return ioread32be(&xor_reg->ccbalr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * ppc440spe_chan_run - enable the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static void ppc440spe_chan_run(struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct xor_regs *xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /* DMAs are always enabled, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* drain write buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) xor_reg = chan->device->xor_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /* fetch descriptor pointed to in <link> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) iowrite32be(XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) &xor_reg->crsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * ADMA device level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ******************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static dma_cookie_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) dma_addr_t addr, int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ppc440spe_adma_memcpy_xor_set_src(struct ppc440spe_adma_desc_slot *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) dma_addr_t addr, int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) dma_addr_t *paddr, unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) dma_addr_t addr, int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) ppc440spe_adma_pq_set_src_mult(struct ppc440spe_adma_desc_slot *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) unsigned char mult, int index, int dst_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) ppc440spe_adma_pqzero_sum_set_dest(struct ppc440spe_adma_desc_slot *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) dma_addr_t paddr, dma_addr_t qaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) static struct page *ppc440spe_rxor_srcs[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * ppc440spe_can_rxor - check if the operands may be processed with RXOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static int ppc440spe_can_rxor(struct page **srcs, int src_cnt, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int i, order = 0, state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (unlikely(!(src_cnt > 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) BUG_ON(src_cnt > ARRAY_SIZE(ppc440spe_rxor_srcs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* Skip holes in the source list before checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) for (i = 0; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (!srcs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) ppc440spe_rxor_srcs[idx++] = srcs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) src_cnt = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) for (i = 1; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) char *cur_addr = page_address(ppc440spe_rxor_srcs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) char *old_addr = page_address(ppc440spe_rxor_srcs[i - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (cur_addr == old_addr + len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* direct RXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) order = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) } else if (old_addr == cur_addr + len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* reverse RXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) order = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if ((i == src_cnt - 2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) (order == -1 && cur_addr != old_addr - len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) } else if ((cur_addr == old_addr + len * order) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) (cur_addr == old_addr + 2 * len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) (cur_addr == old_addr + 3 * len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) state = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (state == 1 || state == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * ppc440spe_adma_device_estimate - estimate the efficiency of processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * the operation given on this channel. It's assumed that 'chan' is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * capable to process 'cap' type of operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * @chan: channel to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * @cap: type of transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * @dst_lst: array of destination pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * @dst_cnt: number of destination operands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * @src_lst: array of source pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * @src_cnt: number of source operands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * @src_sz: size of each source operand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) static int ppc440spe_adma_estimate(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) enum dma_transaction_type cap, struct page **dst_lst, int dst_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) struct page **src_lst, int src_cnt, size_t src_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) int ef = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* If RAID-6 capabilities were not activated don't try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * to use them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (unlikely(!ppc440spe_r6_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /* In the current implementation of ppc440spe ADMA driver it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * makes sense to pick out only pq case, because it may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * processed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * (1) either using Biskup method on DMA2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * (2) or on DMA0/1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * Thus we give a favour to (1) if the sources are suitable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * else let it be processed on one of the DMA0/1 engines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * In the sum_product case where destination is also the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * source process it on DMA0/1 only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (cap == DMA_PQ && chan->chan_id == PPC440SPE_XOR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (dst_cnt == 1 && src_cnt == 2 && dst_lst[0] == src_lst[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) ef = 0; /* sum_product case, process on DMA0/1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) else if (ppc440spe_can_rxor(src_lst, src_cnt, src_sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) ef = 3; /* override (DMA0/1 + idle) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) ef = 0; /* can't process on DMA2 if !rxor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /* channel idleness increases the priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (likely(ef) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) !ppc440spe_chan_is_busy(to_ppc440spe_adma_chan(chan)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) ef++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return ef;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct dma_chan *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct page **dst_lst, int dst_cnt, struct page **src_lst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) int src_cnt, size_t src_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct dma_chan *best_chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct ppc_dma_chan_ref *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) int best_rank = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (unlikely(!src_sz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) if (src_sz > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * should a user of the api ever pass > PAGE_SIZE requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * we sort out cases where temporary page-sized buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * are used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) switch (cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) case DMA_PQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (src_cnt == 1 && dst_lst[1] == src_lst[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (src_cnt == 2 && dst_lst[1] == src_lst[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) case DMA_PQ_VAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) case DMA_XOR_VAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) list_for_each_entry(ref, &ppc440spe_adma_chan_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) int rank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) rank = ppc440spe_adma_estimate(ref->chan, cap, dst_lst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) dst_cnt, src_lst, src_cnt, src_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (rank > best_rank) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) best_rank = rank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) best_chan = ref->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return best_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) EXPORT_SYMBOL_GPL(ppc440spe_async_tx_find_best_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * ppc440spe_get_group_entry - get group entry with index idx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * @tdesc: is the last allocated slot in the group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) static struct ppc440spe_adma_desc_slot *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) ppc440spe_get_group_entry(struct ppc440spe_adma_desc_slot *tdesc, u32 entry_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct ppc440spe_adma_desc_slot *iter = tdesc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) list_for_each_entry(iter, &tdesc->group_list, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (i++ == entry_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) return iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * ppc440spe_adma_free_slots - flags descriptor slots for reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * @slot: Slot to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * Caller must hold &ppc440spe_chan->lock while calling this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) int stride = slot->slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) while (stride--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) slot->slots_per_op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) slot = list_entry(slot->slot_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * ppc440spe_adma_run_tx_complete_actions - call functions to be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * upon completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) struct ppc440spe_adma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) dma_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) BUG_ON(desc->async_tx.cookie < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (desc->async_tx.cookie > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) cookie = desc->async_tx.cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) desc->async_tx.cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) dma_descriptor_unmap(&desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* call the callback (must not sleep or submit new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * operations to this channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /* run dependent operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) dma_run_dependencies(&desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * ppc440spe_adma_clean_slot - clean up CDB slot (if ack is set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) static int ppc440spe_adma_clean_slot(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* the client is allowed to attach dependent operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * until 'ack' is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (!async_tx_test_ack(&desc->async_tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /* leave the last descriptor in the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * so we can append to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (list_is_last(&desc->chain_node, &chan->chain) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) desc->phys == ppc440spe_chan_get_current_descriptor(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (chan->device->id != PPC440SPE_XOR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /* our DMA interrupt handler clears opc field of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * each processed descriptor. For all types of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * operations except for ZeroSum we do not actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * need ack from the interrupt handler. ZeroSum is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * special case since the result of this operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * is available from the handler only, so if we see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * such type of descriptor (which is unprocessed yet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * then leave it in chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct dma_cdb *cdb = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (cdb->opc == DMA_CDB_OPC_DCHECK128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) desc->phys, desc->idx, desc->slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) list_del(&desc->chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) ppc440spe_adma_free_slots(desc, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * __ppc440spe_adma_slot_cleanup - this is the common clean-up routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * which runs through the channel CDBs list until reach the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * currently processed. When routine determines that all CDBs of group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * are completed then corresponding callbacks (if any) are called and slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * are freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) dma_cookie_t cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) u32 current_desc = ppc440spe_chan_get_current_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) int busy = ppc440spe_chan_is_busy(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) dev_dbg(chan->device->common.dev, "ppc440spe adma%d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) chan->device->id, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (!current_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /* There were no transactions yet, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * nothing to clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /* free completed slots from the chain starting with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * the oldest descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) list_for_each_entry_safe(iter, _iter, &chan->chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) "busy: %d this_desc: %#llx next_desc: %#x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) "cur: %#x ack: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) iter->async_tx.cookie, iter->idx, busy, iter->phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) ppc440spe_desc_get_link(iter, chan), current_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) async_tx_test_ack(&iter->async_tx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) prefetch(_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) prefetch(&_iter->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /* do not advance past the current descriptor loaded into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * hardware channel,subsequent descriptors are either in process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * or have not been submitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (seen_current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /* stop the search if we reach the current descriptor and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * channel is busy, or if it appears that the current descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * needs to be re-read (i.e. has been appended to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (iter->phys == current_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) BUG_ON(seen_current++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (busy || ppc440spe_desc_get_link(iter, chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /* not all descriptors of the group have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * been completed; exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) /* detect the start of a group transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (!slot_cnt && !slots_per_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) slot_cnt = iter->slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) slots_per_op = iter->slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (slot_cnt <= slots_per_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) slot_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) slots_per_op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (slot_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (!group_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) group_start = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) slot_cnt -= slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* all the members of a group are complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (slots_per_op != 0 && slot_cnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct ppc440spe_adma_desc_slot *grp_iter, *_grp_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) int end_of_chain = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* clean up the group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) slot_cnt = group_start->slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) grp_iter = group_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) list_for_each_entry_safe_from(grp_iter, _grp_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) &chan->chain, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) cookie = ppc440spe_adma_run_tx_complete_actions(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) grp_iter, chan, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) slot_cnt -= slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) end_of_chain = ppc440spe_adma_clean_slot(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) grp_iter, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if (end_of_chain && slot_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) /* Should wait for ZeroSum completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (cookie > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) chan->common.completed_cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (slot_cnt == 0 || end_of_chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /* the group should be complete at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) BUG_ON(slot_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) slots_per_op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) group_start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (end_of_chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) } else if (slots_per_op) /* wait for group completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (ppc440spe_adma_clean_slot(iter, chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) BUG_ON(!seen_current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (cookie > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) chan->common.completed_cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) pr_debug("\tcompleted cookie %d\n", cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * ppc440spe_adma_tasklet - clean up watch-dog initiator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) static void ppc440spe_adma_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct ppc440spe_adma_chan *chan = from_tasklet(chan, t, irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) spin_lock_nested(&chan->lock, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) __ppc440spe_adma_slot_cleanup(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) spin_unlock(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * ppc440spe_adma_slot_cleanup - clean up scheduled initiator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static void ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) spin_lock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) __ppc440spe_adma_slot_cleanup(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) spin_unlock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * ppc440spe_adma_alloc_slots - allocate free slots (if any)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static struct ppc440spe_adma_desc_slot *ppc440spe_adma_alloc_slots(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct ppc440spe_adma_chan *chan, int num_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) int slots_per_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) struct ppc440spe_adma_desc_slot *iter = NULL, *_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct ppc440spe_adma_desc_slot *alloc_start = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) struct list_head chain = LIST_HEAD_INIT(chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) int slots_found, retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) BUG_ON(!num_slots || !slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) /* start search from the last allocated descrtiptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * if a contiguous allocation can not be found start searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * from the beginning of the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) slots_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (retry == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) iter = chan->last_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) iter = list_entry(&chan->all_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) slot_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) prefetch(_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) prefetch(&_iter->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (iter->slots_per_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) slots_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /* start the allocation if the slot is correctly aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (!slots_found++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) alloc_start = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (slots_found == num_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) struct ppc440spe_adma_desc_slot *alloc_tail = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) struct ppc440spe_adma_desc_slot *last_used = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) iter = alloc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) while (num_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) /* pre-ack all but the last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (num_slots != slots_per_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) async_tx_ack(&iter->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) list_add_tail(&iter->chain_node, &chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) alloc_tail = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) iter->async_tx.cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) iter->hw_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) iter->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) iter->slot_cnt = num_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) iter->xor_check_result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) for (i = 0; i < slots_per_op; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) iter->slots_per_op = slots_per_op - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) last_used = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) iter = list_entry(iter->slot_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) num_slots -= slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) alloc_tail->group_head = alloc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) alloc_tail->async_tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) list_splice(&chain, &alloc_tail->group_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) chan->last_used = last_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return alloc_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (!retry++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /* try to free some slots if the allocation fails */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) tasklet_schedule(&chan->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * ppc440spe_adma_alloc_chan_resources - allocate pools for CDB slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static int ppc440spe_adma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct ppc440spe_adma_chan *ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) struct ppc440spe_adma_desc_slot *slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) char *hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) int i, db_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) int init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) ppc440spe_chan = to_ppc440spe_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) init = ppc440spe_chan->slots_allocated ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) chan->chan_id = ppc440spe_chan->device->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) /* Allocate descriptor slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) i = ppc440spe_chan->slots_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (ppc440spe_chan->device->id != PPC440SPE_XOR_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) db_sz = sizeof(struct dma_cdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) db_sz = sizeof(struct xor_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) for (; i < (ppc440spe_chan->device->pool_size / db_sz); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) slot = kzalloc(sizeof(struct ppc440spe_adma_desc_slot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (!slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) printk(KERN_INFO "SPE ADMA Channel only initialized"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) " %d descriptor slots", i--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) hw_desc = (char *) ppc440spe_chan->device->dma_desc_pool_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) slot->hw_desc = (void *) &hw_desc[i * db_sz];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) dma_async_tx_descriptor_init(&slot->async_tx, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) slot->async_tx.tx_submit = ppc440spe_adma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) INIT_LIST_HEAD(&slot->chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) INIT_LIST_HEAD(&slot->slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) INIT_LIST_HEAD(&slot->group_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) slot->phys = ppc440spe_chan->device->dma_desc_pool + i * db_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) slot->idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) spin_lock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) ppc440spe_chan->slots_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) list_add_tail(&slot->slot_node, &ppc440spe_chan->all_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (i && !ppc440spe_chan->last_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) ppc440spe_chan->last_used =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) list_entry(ppc440spe_chan->all_slots.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) dev_dbg(ppc440spe_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) "ppc440spe adma%d: allocated %d descriptor slots\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) ppc440spe_chan->device->id, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) /* initialize the channel and the chain with a null operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) switch (ppc440spe_chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) ppc440spe_chan->hw_chain_inited = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) /* Use WXOR for self-testing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) if (!ppc440spe_r6_tchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) ppc440spe_r6_tchan = ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) ppc440spe_chan_start_null_xor(ppc440spe_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) ppc440spe_chan->needs_unmap = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) return (i > 0) ? i : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * ppc440spe_rxor_set_region_data -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) static void ppc440spe_rxor_set_region(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) u8 xor_arg_no, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) struct xor_cb *xcb = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) xcb->ops[xor_arg_no].h |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * ppc440spe_rxor_set_src -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) static void ppc440spe_rxor_set_src(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) u8 xor_arg_no, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) struct xor_cb *xcb = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) xcb->ops[xor_arg_no].h |= DMA_CUED_XOR_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) xcb->ops[xor_arg_no].l = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * ppc440spe_rxor_set_mult -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) static void ppc440spe_rxor_set_mult(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) u8 xor_arg_no, u8 idx, u8 mult)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) struct xor_cb *xcb = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) xcb->ops[xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * ppc440spe_adma_check_threshold - append CDBs to h/w chain if threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * has been achieved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) static void ppc440spe_adma_check_threshold(struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) dev_dbg(chan->device->common.dev, "ppc440spe adma%d: pending: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) chan->device->id, chan->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (chan->pending >= PPC440SPE_ADMA_THRESHOLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) chan->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) ppc440spe_chan_append(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) * ppc440spe_adma_tx_submit - submit new descriptor group to the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) * (it's not necessary that descriptors will be submitted to the h/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * chains too right now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static dma_cookie_t ppc440spe_adma_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) struct ppc440spe_adma_desc_slot *sw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) struct ppc440spe_adma_chan *chan = to_ppc440spe_adma_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) struct ppc440spe_adma_desc_slot *group_start, *old_chain_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) int slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) int slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) sw_desc = tx_to_ppc440spe_adma_slot(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) group_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) slot_cnt = group_start->slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) slots_per_op = group_start->slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) spin_lock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (unlikely(list_empty(&chan->chain))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) /* first peer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) list_splice_init(&sw_desc->group_list, &chan->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) chan_first_cdb[chan->device->id] = group_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) /* isn't first peer, bind CDBs to chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) old_chain_tail = list_entry(chan->chain.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) list_splice_init(&sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) &old_chain_tail->chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) /* fix up the hardware chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) ppc440spe_desc_set_link(chan, old_chain_tail, group_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) /* increment the pending count by the number of operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) chan->pending += slot_cnt / slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) ppc440spe_adma_check_threshold(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) spin_unlock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) dev_dbg(chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) "ppc440spe adma%d: %s cookie: %d slot: %d tx %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) chan->device->id, __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * ppc440spe_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_interrupt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) struct dma_chan *chan, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) struct ppc440spe_adma_chan *ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) ppc440spe_chan = to_ppc440spe_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) dev_dbg(ppc440spe_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) "ppc440spe adma%d: %s\n", ppc440spe_chan->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) spin_lock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) slot_cnt = slots_per_op = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) group_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) ppc440spe_desc_init_interrupt(group_start, ppc440spe_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) group_start->unmap_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) * ppc440spe_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_memcpy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) struct dma_chan *chan, dma_addr_t dma_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) dma_addr_t dma_src, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) struct ppc440spe_adma_chan *ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) ppc440spe_chan = to_ppc440spe_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (unlikely(!len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) BUG_ON(len > PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) spin_lock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) dev_dbg(ppc440spe_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) "ppc440spe adma%d: %s len: %u int_en %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) ppc440spe_chan->device->id, __func__, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) flags & DMA_PREP_INTERRUPT ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) slot_cnt = slots_per_op = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) group_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) ppc440spe_desc_init_memcpy(group_start, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) ppc440spe_adma_set_dest(group_start, dma_dest, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) ppc440spe_adma_memcpy_xor_set_src(group_start, dma_src, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) sw_desc->unmap_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) * ppc440spe_adma_prep_dma_xor - prepare CDB for a XOR operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) struct dma_chan *chan, dma_addr_t dma_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) dma_addr_t *dma_src, u32 src_cnt, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) struct ppc440spe_adma_chan *ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) ppc440spe_chan = to_ppc440spe_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) ADMA_LL_DBG(prep_dma_xor_dbg(ppc440spe_chan->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) dma_dest, dma_src, src_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (unlikely(!len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) dev_dbg(ppc440spe_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) ppc440spe_chan->device->id, __func__, src_cnt, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) flags & DMA_PREP_INTERRUPT ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) spin_lock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) slot_cnt = ppc440spe_chan_xor_slot_count(len, src_cnt, &slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) group_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) ppc440spe_desc_init_xor(group_start, src_cnt, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) ppc440spe_adma_set_dest(group_start, dma_dest, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) while (src_cnt--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) ppc440spe_adma_memcpy_xor_set_src(group_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) dma_src[src_cnt], src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) ppc440spe_desc_set_byte_count(group_start, ppc440spe_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) sw_desc->unmap_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) ppc440spe_desc_set_xor_src_cnt(struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) int src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) * ppc440spe_adma_init_dma2rxor_slot -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static void ppc440spe_adma_init_dma2rxor_slot(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) dma_addr_t *src, int src_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) /* initialize CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) for (i = 0; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) ppc440spe_adma_dma2rxor_prep_src(desc, &desc->rxor_cursor, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) desc->src_cnt, (u32)src[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * ppc440spe_dma01_prep_mult -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * for Q operation where destination is also the source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_mult(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) struct ppc440spe_adma_chan *ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) const unsigned char *scf, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) struct ppc440spe_adma_desc_slot *sw_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) unsigned long op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) int slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) set_bit(PPC440SPE_DESC_WXOR, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) slot_cnt = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) spin_lock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) /* use WXOR, each descriptor occupies one slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) struct ppc440spe_adma_desc_slot *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) struct dma_cdb *hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) set_bits(op, &sw_desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) sw_desc->src_cnt = src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) sw_desc->dst_cnt = dst_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* First descriptor, zero data in the destination and copy it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) * to q page using MULTICAST transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) iter = list_first_entry(&sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) /* set 'next' pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) iter->hw_next = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) clear_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) hw_desc->opc = DMA_CDB_OPC_MULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) DMA_CUED_XOR_BASE, dst[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) src[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) iter->unmap_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * Second descriptor, multiply data from the q page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * and store the result in real destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) iter = list_first_entry(&iter->chain_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) iter->hw_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) set_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) clear_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) ppc440spe_desc_set_src_addr(iter, chan, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) DMA_CUED_XOR_HB, dst[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) DMA_CUED_XOR_BASE, dst[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) DMA_CDB_SG_DST1, scf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) iter->unmap_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) return sw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) * ppc440spe_dma01_prep_sum_product -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) * Dx = A*(P+Pxy) + B*(Q+Qxy) operation where destination is also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) * the source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_sum_product(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) struct ppc440spe_adma_chan *ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) dma_addr_t *dst, dma_addr_t *src, int src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) const unsigned char *scf, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) struct ppc440spe_adma_desc_slot *sw_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) unsigned long op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) int slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) set_bit(PPC440SPE_DESC_WXOR, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) slot_cnt = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) spin_lock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) /* WXOR, each descriptor occupies one slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) struct ppc440spe_adma_desc_slot *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) struct dma_cdb *hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) set_bits(op, &sw_desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) sw_desc->src_cnt = src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) sw_desc->dst_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) /* 1st descriptor, src[1] data to q page and zero destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) iter = list_first_entry(&sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) iter->hw_next = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) clear_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) hw_desc->opc = DMA_CDB_OPC_MULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) *dst, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) ppc440spe_desc_set_dest_addr(iter, chan, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) ppc440spe_chan->qdest, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) src[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) iter->unmap_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) /* 2nd descriptor, multiply src[1] data and store the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * result in destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) iter = list_first_entry(&iter->chain_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) /* set 'next' pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) iter->hw_next = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) set_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) clear_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) ppc440spe_chan->qdest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) *dst, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) DMA_CDB_SG_DST1, scf[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) iter->unmap_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * 3rd descriptor, multiply src[0] data and xor it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * with destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) iter = list_first_entry(&iter->chain_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) iter->hw_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) set_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) clear_bit(PPC440SPE_DESC_INT, &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) src[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) *dst, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) DMA_CDB_SG_DST1, scf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) iter->unmap_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) return sw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) static struct ppc440spe_adma_desc_slot *ppc440spe_dma01_prep_pq(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) struct ppc440spe_adma_chan *ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) const unsigned char *scf, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) int slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) unsigned long op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) unsigned char mult = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) __func__, dst_cnt, src_cnt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) /* select operations WXOR/RXOR depending on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) * source addresses of operators and the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) * of destinations (RXOR support only Q-parity calculations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) set_bit(PPC440SPE_DESC_WXOR, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (!test_and_set_bit(PPC440SPE_RXOR_RUN, &ppc440spe_rxor_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) /* no active RXOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) * do RXOR if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) * - there are more than 1 source,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) * - len is aligned on 512-byte boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) * - source addresses fit to one of 4 possible regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (src_cnt > 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) !(len & MQ0_CF2H_RXOR_BS_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) (src[0] + len) == src[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) /* may do RXOR R1 R2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) set_bit(PPC440SPE_DESC_RXOR, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (src_cnt != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) /* may try to enhance region of RXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if ((src[1] + len) == src[2]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) /* do RXOR R1 R2 R3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) set_bit(PPC440SPE_DESC_RXOR123,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) } else if ((src[1] + len * 2) == src[2]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) /* do RXOR R1 R2 R4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) set_bit(PPC440SPE_DESC_RXOR124, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) } else if ((src[1] + len * 3) == src[2]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) /* do RXOR R1 R2 R5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) set_bit(PPC440SPE_DESC_RXOR125,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) /* do RXOR R1 R2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) set_bit(PPC440SPE_DESC_RXOR12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) /* do RXOR R1 R2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) set_bit(PPC440SPE_DESC_RXOR12, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /* can not do this operation with RXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) clear_bit(PPC440SPE_RXOR_RUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) &ppc440spe_rxor_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) /* can do; set block size right now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) ppc440spe_desc_set_rxor_block_size(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) /* Number of necessary slots depends on operation type selected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (!test_bit(PPC440SPE_DESC_RXOR, &op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) /* This is a WXOR only chain. Need descriptors for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) * source to GF-XOR them with WXOR, and need descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) * for each destination to zero them with WXOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) slot_cnt = src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) if (flags & DMA_PREP_ZERO_P) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) slot_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) set_bit(PPC440SPE_ZERO_P, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (flags & DMA_PREP_ZERO_Q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) slot_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) set_bit(PPC440SPE_ZERO_Q, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) /* Need 1/2 descriptor for RXOR operation, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) * need (src_cnt - (2 or 3)) for WXOR of sources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * remained (if any)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) slot_cnt = dst_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (flags & DMA_PREP_ZERO_P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) set_bit(PPC440SPE_ZERO_P, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (flags & DMA_PREP_ZERO_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) set_bit(PPC440SPE_ZERO_Q, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (test_bit(PPC440SPE_DESC_RXOR12, &op))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) slot_cnt += src_cnt - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) slot_cnt += src_cnt - 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) /* Thus we have either RXOR only chain or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) * mixed RXOR/WXOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) if (slot_cnt == dst_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) /* RXOR only chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) clear_bit(PPC440SPE_DESC_WXOR, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) spin_lock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) /* for both RXOR/WXOR each descriptor occupies one slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) ppc440spe_desc_init_dma01pq(sw_desc, dst_cnt, src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) flags, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) /* setup dst/src/mult */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) pr_debug("%s: set dst descriptor 0, 1: 0x%016llx, 0x%016llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) __func__, dst[0], dst[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) while (src_cnt--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) /* NOTE: "Multi = 0 is equivalent to = 1" as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) * stated in 440SPSPe_RAID6_Addendum_UM_1_17.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * doesn't work for RXOR with DMA0/1! Instead, multi=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) * leads to zeroing source data after RXOR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) * So, for P case set-up mult=1 explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) if (!(flags & DMA_PREP_PQ_DISABLE_Q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) mult = scf[src_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) ppc440spe_adma_pq_set_src_mult(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) mult, src_cnt, dst_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) /* Setup byte count foreach slot just allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) list_for_each_entry(iter, &sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) ppc440spe_desc_set_byte_count(iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) ppc440spe_chan, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) iter->unmap_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) return sw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) static struct ppc440spe_adma_desc_slot *ppc440spe_dma2_prep_pq(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) struct ppc440spe_adma_chan *ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) dma_addr_t *dst, int dst_cnt, dma_addr_t *src, int src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) const unsigned char *scf, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) int slot_cnt, descs_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) unsigned long op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) unsigned char mult = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) BUG_ON(!dst_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) /*pr_debug("%s: dst_cnt %d, src_cnt %d, len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) __func__, dst_cnt, src_cnt, len);*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) spin_lock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) descs_per_op = ppc440spe_dma2_pq_slot_count(src, src_cnt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) if (descs_per_op < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) /* depending on number of sources we have 1 or 2 RXOR chains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) slot_cnt = descs_per_op * dst_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) op = slot_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) --op ? 0 : flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) iter->unmap_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) ppc440spe_init_rxor_cursor(&(iter->rxor_cursor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) iter->rxor_cursor.len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) iter->descs_per_op = descs_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) op++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (op % descs_per_op == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) ppc440spe_adma_init_dma2rxor_slot(iter, src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) if (likely(!list_is_last(&iter->chain_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) &sw_desc->group_list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) /* set 'next' pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) iter->hw_next =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) ppc440spe_xor_set_link(iter, iter->hw_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) /* this is the last descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) iter->hw_next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) /* fixup head descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) sw_desc->dst_cnt = dst_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (flags & DMA_PREP_ZERO_P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) set_bit(PPC440SPE_ZERO_P, &sw_desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (flags & DMA_PREP_ZERO_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) set_bit(PPC440SPE_ZERO_Q, &sw_desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) /* setup dst/src/mult */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) ppc440spe_adma_pq_set_dest(sw_desc, dst, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) while (src_cnt--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) /* handle descriptors (if dst_cnt == 2) inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) * the ppc440spe_adma_pq_set_srcxxx() functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) ppc440spe_adma_pq_set_src(sw_desc, src[src_cnt],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) if (!(flags & DMA_PREP_PQ_DISABLE_Q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) mult = scf[src_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) ppc440spe_adma_pq_set_src_mult(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) mult, src_cnt, dst_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) ppc440spe_desc_set_rxor_block_size(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) return sw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) * ppc440spe_adma_prep_dma_pq - prepare CDB (group) for a GF-XOR operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pq(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) unsigned int src_cnt, const unsigned char *scf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) struct ppc440spe_adma_chan *ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) struct ppc440spe_adma_desc_slot *sw_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) int dst_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) ppc440spe_chan = to_ppc440spe_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) ADMA_LL_DBG(prep_dma_pq_dbg(ppc440spe_chan->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) dst, src, src_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) BUG_ON(!len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) BUG_ON(len > PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) BUG_ON(!src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (src_cnt == 1 && dst[1] == src[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) dma_addr_t dest[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) /* dst[1] is real destination (Q) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) dest[0] = dst[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) /* this is the page to multicast source data to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) dest[1] = ppc440spe_chan->qdest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) sw_desc = ppc440spe_dma01_prep_mult(ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) dest, 2, src, src_cnt, scf, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) if (src_cnt == 2 && dst[1] == src[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) sw_desc = ppc440spe_dma01_prep_sum_product(ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) &dst[1], src, 2, scf, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) if (!(flags & DMA_PREP_PQ_DISABLE_P)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) BUG_ON(!dst[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) dst_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) flags |= DMA_PREP_ZERO_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) if (!(flags & DMA_PREP_PQ_DISABLE_Q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) BUG_ON(!dst[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) dst_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) flags |= DMA_PREP_ZERO_Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) BUG_ON(!dst_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) dev_dbg(ppc440spe_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) "ppc440spe adma%d: %s src_cnt: %d len: %u int_en: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) ppc440spe_chan->device->id, __func__, src_cnt, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) flags & DMA_PREP_INTERRUPT ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) switch (ppc440spe_chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) sw_desc = ppc440spe_dma01_prep_pq(ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) dst, dst_cnt, src, src_cnt, scf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) sw_desc = ppc440spe_dma2_prep_pq(ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) dst, dst_cnt, src, src_cnt, scf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) * ppc440spe_adma_prep_dma_pqzero_sum - prepare CDB group for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) * a PQ_ZERO_SUM operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_pqzero_sum(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) unsigned int src_cnt, const unsigned char *scf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) enum sum_check_flags *pqres, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) struct ppc440spe_adma_chan *ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) struct ppc440spe_adma_desc_slot *sw_desc, *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) dma_addr_t pdest, qdest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) int slot_cnt, slots_per_op, idst, dst_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) ppc440spe_chan = to_ppc440spe_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) if (flags & DMA_PREP_PQ_DISABLE_P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) pdest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) pdest = pq[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (flags & DMA_PREP_PQ_DISABLE_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) qdest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) qdest = pq[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) ADMA_LL_DBG(prep_dma_pqzero_sum_dbg(ppc440spe_chan->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) src, src_cnt, scf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) /* Always use WXOR for P/Q calculations (two destinations).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * Need 1 or 2 extra slots to verify results are zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) idst = dst_cnt = (pdest && qdest) ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) /* One additional slot per destination to clone P/Q
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) * before calculation (we have to preserve destinations).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) slot_cnt = src_cnt + dst_cnt * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) slots_per_op = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) spin_lock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) sw_desc = ppc440spe_adma_alloc_slots(ppc440spe_chan, slot_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) ppc440spe_desc_init_dma01pqzero_sum(sw_desc, dst_cnt, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) /* Setup byte count for each slot just allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) sw_desc->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) iter->unmap_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (pdest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) struct dma_cdb *hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) iter = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) iter->hw_next = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) iter->src_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) iter->dst_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) ppc440spe_desc_set_dest_addr(iter, chan, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) ppc440spe_chan->pdest, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) iter->unmap_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) /* override pdest to preserve original P */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) pdest = ppc440spe_chan->pdest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) if (qdest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) struct dma_cdb *hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) iter = list_first_entry(&sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) if (pdest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) iter = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) memset(iter->hw_desc, 0, sizeof(struct dma_cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) iter->hw_next = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) hw_desc = iter->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) iter->src_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) iter->dst_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) ppc440spe_desc_set_dest_addr(iter, chan, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) ppc440spe_chan->qdest, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) ppc440spe_desc_set_byte_count(iter, ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) iter->unmap_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) /* override qdest to preserve original Q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) qdest = ppc440spe_chan->qdest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) /* Setup destinations for P/Q ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) ppc440spe_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) /* Setup zero QWORDs into DCHECK CDBs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) idst = dst_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) list_for_each_entry_reverse(iter, &sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) * The last CDB corresponds to Q-parity check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * the one before last CDB corresponds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) * P-parity check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) if (idst == DMA_DEST_MAX_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) if (idst == dst_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) set_bit(PPC440SPE_DESC_QCHECK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) set_bit(PPC440SPE_DESC_PCHECK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (qdest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) set_bit(PPC440SPE_DESC_QCHECK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) set_bit(PPC440SPE_DESC_PCHECK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) &iter->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) iter->xor_check_result = pqres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) * set it to zero, if check fail then result will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) * be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) *iter->xor_check_result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) ppc440spe_desc_set_dcheck(iter, ppc440spe_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) ppc440spe_qword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) if (!(--dst_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) /* Setup sources and mults for P/Q ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) list_for_each_entry_continue_reverse(iter, &sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) u32 mult_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) chan = to_ppc440spe_adma_chan(iter->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) ppc440spe_desc_set_src_addr(iter, chan, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) DMA_CUED_XOR_HB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) src[src_cnt - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) if (qdest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) mult_dst = (dst_cnt - 1) ? DMA_CDB_SG_DST2 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) DMA_CDB_SG_DST1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) ppc440spe_desc_set_src_mult(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) DMA_CUED_MULT1_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) mult_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) scf[src_cnt - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (!(--src_cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) return sw_desc ? &sw_desc->async_tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) * ppc440spe_adma_prep_dma_xor_zero_sum - prepare CDB group for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) * XOR ZERO_SUM operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) static struct dma_async_tx_descriptor *ppc440spe_adma_prep_dma_xor_zero_sum(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) size_t len, enum sum_check_flags *result, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) dma_addr_t pq[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) /* validate P, disable Q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) pq[0] = src[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) pq[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) flags |= DMA_PREP_PQ_DISABLE_Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) src_cnt - 1, 0, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) result, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) return tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) * ppc440spe_adma_set_dest - set destination address into descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) static void ppc440spe_adma_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) dma_addr_t addr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) BUG_ON(index >= sw_desc->dst_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) /* to do: support transfers lengths >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * PPC440SPE_ADMA_DMA/XOR_MAX_BYTE_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) ppc440spe_desc_set_dest_addr(sw_desc->group_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) chan, 0, addr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) sw_desc = ppc440spe_get_group_entry(sw_desc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) ppc440spe_desc_set_dest_addr(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) chan, 0, addr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) struct ppc440spe_adma_chan *chan, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) /* To clear destinations update the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) * (P or Q depending on index) as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) * addr is destination (0 corresponds to SG2):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) /* ... and the addr is source: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) /* addr is always SG2 then the mult is always DST1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) DMA_CDB_SG_DST1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) * ppc440spe_adma_pq_set_dest - set destination address into descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) * for the PQXOR operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) static void ppc440spe_adma_pq_set_dest(struct ppc440spe_adma_desc_slot *sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) dma_addr_t *addrs, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) struct ppc440spe_adma_desc_slot *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) dma_addr_t paddr, qaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) dma_addr_t addr = 0, ppath, qpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) int index = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (flags & DMA_PREP_PQ_DISABLE_P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) paddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) paddr = addrs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) if (flags & DMA_PREP_PQ_DISABLE_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) qaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) qaddr = addrs[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) if (!paddr || !qaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) addr = paddr ? paddr : qaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) /* walk through the WXOR source list and set P/Q-destinations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) * for each slot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) if (!test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) /* This is WXOR-only chain; may have 1/2 zero descs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) iter = ppc440spe_get_group_entry(sw_desc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) /* one destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) list_for_each_entry_from(iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) &sw_desc->group_list, chain_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) DMA_CUED_XOR_BASE, addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) /* two destinations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) list_for_each_entry_from(iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) &sw_desc->group_list, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) DMA_CUED_XOR_BASE, paddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) DMA_CUED_XOR_BASE, qaddr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) if (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) /* To clear destinations update the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) * (1st,2nd, or both depending on flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) if (test_bit(PPC440SPE_ZERO_P,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) &sw_desc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) iter = ppc440spe_get_group_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) sw_desc, index++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) ppc440spe_adma_pq_zero_op(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (test_bit(PPC440SPE_ZERO_Q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) &sw_desc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) iter = ppc440spe_get_group_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) sw_desc, index++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) ppc440spe_adma_pq_zero_op(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) qaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) /* This is RXOR-only or RXOR/WXOR mixed chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) /* If we want to include destination into calculations,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) * then make dest addresses cued with mult=1 (XOR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) DMA_CUED_XOR_HB :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) DMA_CUED_XOR_BASE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) (1 << DMA_CUED_MULT1_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) DMA_CUED_XOR_HB :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) DMA_CUED_XOR_BASE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) (1 << DMA_CUED_MULT1_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) /* Setup destination(s) in RXOR slot(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) iter = ppc440spe_get_group_entry(sw_desc, index++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) paddr ? ppath : qpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) paddr ? paddr : qaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) /* two destinations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) iter = ppc440spe_get_group_entry(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) index++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) qpath, qaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) if (test_bit(PPC440SPE_DESC_WXOR, &sw_desc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) /* Setup destination(s) in remaining WXOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) * slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) iter = ppc440spe_get_group_entry(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) /* one destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) list_for_each_entry_from(iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) &sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) chain_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) ppc440spe_desc_set_dest_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) DMA_CUED_XOR_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) /* two destinations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) list_for_each_entry_from(iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) &sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) ppc440spe_desc_set_dest_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) DMA_CUED_XOR_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) paddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) ppc440spe_desc_set_dest_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) DMA_CUED_XOR_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) qaddr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) /* DMA2 descriptors have only 1 destination, so there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) * two chains - one for each dest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) * If we want to include destination into calculations,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) * then make dest addresses cued with mult=1 (XOR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) ppath = test_bit(PPC440SPE_ZERO_P, &sw_desc->flags) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) DMA_CUED_XOR_HB :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) DMA_CUED_XOR_BASE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) (1 << DMA_CUED_MULT1_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) qpath = test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) DMA_CUED_XOR_HB :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) DMA_CUED_XOR_BASE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) (1 << DMA_CUED_MULT1_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) iter = ppc440spe_get_group_entry(sw_desc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) for (i = 0; i < sw_desc->descs_per_op; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) paddr ? ppath : qpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) paddr ? paddr : qaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) iter = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /* Two destinations; setup Q here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) iter = ppc440spe_get_group_entry(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) sw_desc->descs_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) for (i = 0; i < sw_desc->descs_per_op; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) ppc440spe_desc_set_dest_addr(iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) chan, qpath, qaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) iter = list_entry(iter->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) * ppc440spe_adma_pq_zero_sum_set_dest - set destination address into descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) * for the PQ_ZERO_SUM operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) static void ppc440spe_adma_pqzero_sum_set_dest(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) struct ppc440spe_adma_desc_slot *sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) dma_addr_t paddr, dma_addr_t qaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) struct ppc440spe_adma_desc_slot *iter, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) dma_addr_t addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) /* walk through the WXOR source list and set P/Q-destinations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) * for each slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) idx = (paddr && qaddr) ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) /* set end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) list_for_each_entry_reverse(end, &sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) if (!(--idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) /* set start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) idx = (paddr && qaddr) ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) iter = ppc440spe_get_group_entry(sw_desc, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) if (paddr && qaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) /* two destinations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) list_for_each_entry_from(iter, &sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) if (unlikely(iter == end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) DMA_CUED_XOR_BASE, paddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) DMA_CUED_XOR_BASE, qaddr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) /* one destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) addr = paddr ? paddr : qaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) list_for_each_entry_from(iter, &sw_desc->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) if (unlikely(iter == end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) ppc440spe_desc_set_dest_addr(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) DMA_CUED_XOR_BASE, addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) /* The remaining descriptors are DATACHECK. These have no need in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) * destination. Actually, these destinations are used there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) * as sources for check operation. So, set addr as source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) ppc440spe_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) end = list_entry(end->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) struct ppc440spe_adma_desc_slot, chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) ppc440spe_desc_set_src_addr(end, chan, 0, 0, qaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) * ppc440spe_desc_set_xor_src_cnt - set source count into descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) static inline void ppc440spe_desc_set_xor_src_cnt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) int src_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) struct xor_cb *hw_desc = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) hw_desc->cbc |= src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) * ppc440spe_adma_pq_set_src - set source address into descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) static void ppc440spe_adma_pq_set_src(struct ppc440spe_adma_desc_slot *sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) dma_addr_t addr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) dma_addr_t haddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) struct ppc440spe_adma_desc_slot *iter = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) /* RXOR-only or RXOR/WXOR operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) int iskip = test_bit(PPC440SPE_DESC_RXOR12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) &sw_desc->flags) ? 2 : 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) if (index == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) /* 1st slot (RXOR) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) /* setup sources region (R1-2-3, R1-2-4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) * or R1-2-5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (test_bit(PPC440SPE_DESC_RXOR12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) &sw_desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) haddr = DMA_RXOR12 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) DMA_CUED_REGION_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) else if (test_bit(PPC440SPE_DESC_RXOR123,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) &sw_desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) haddr = DMA_RXOR123 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) DMA_CUED_REGION_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) else if (test_bit(PPC440SPE_DESC_RXOR124,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) &sw_desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) haddr = DMA_RXOR124 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) DMA_CUED_REGION_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) else if (test_bit(PPC440SPE_DESC_RXOR125,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) &sw_desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) haddr = DMA_RXOR125 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) DMA_CUED_REGION_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) haddr |= DMA_CUED_XOR_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) iter = ppc440spe_get_group_entry(sw_desc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) } else if (index < iskip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) /* 1st slot (RXOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) * shall actually set source address only once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) * instead of first <iskip>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) iter = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) /* 2nd/3d and next slots (WXOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) * skip first slot with RXOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) haddr = DMA_CUED_XOR_HB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) iter = ppc440spe_get_group_entry(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) index - iskip + sw_desc->dst_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) int znum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) /* WXOR-only operation; skip first slots with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) * zeroing destinations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) znum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) znum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) haddr = DMA_CUED_XOR_HB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) iter = ppc440spe_get_group_entry(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) index + znum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) if (likely(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) if (!index &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) sw_desc->dst_cnt == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) /* if we have two destinations for RXOR, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) * setup source in the second descr too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) iter = ppc440spe_get_group_entry(sw_desc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) ppc440spe_desc_set_src_addr(iter, chan, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) haddr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) /* DMA2 may do Biskup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) iter = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) if (iter->dst_cnt == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) /* both P & Q calculations required; set P src here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) /* this is for Q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) iter = ppc440spe_get_group_entry(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) sw_desc->descs_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) ppc440spe_adma_dma2rxor_set_src(iter, index, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) * ppc440spe_adma_memcpy_xor_set_src - set source address into descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) static void ppc440spe_adma_memcpy_xor_set_src(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) struct ppc440spe_adma_desc_slot *sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) dma_addr_t addr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) sw_desc = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) if (likely(sw_desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) ppc440spe_desc_set_src_addr(sw_desc, chan, index, 0, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) * ppc440spe_adma_dma2rxor_inc_addr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) static void ppc440spe_adma_dma2rxor_inc_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) struct ppc440spe_rxor *cursor, int index, int src_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) cursor->addr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) if (index == src_cnt - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) } else if (cursor->addr_count == XOR_MAX_OPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) ppc440spe_desc_set_xor_src_cnt(desc, cursor->addr_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) cursor->addr_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) cursor->desc_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) * ppc440spe_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) static int ppc440spe_adma_dma2rxor_prep_src(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) struct ppc440spe_adma_desc_slot *hdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) struct ppc440spe_rxor *cursor, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) int src_cnt, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) int rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) u32 sign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) struct ppc440spe_adma_desc_slot *desc = hdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) for (i = 0; i < cursor->desc_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) desc = list_entry(hdesc->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) switch (cursor->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) if (addr == cursor->addrl + cursor->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) /* direct RXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) cursor->state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) cursor->xor_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) if (index == src_cnt-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) ppc440spe_rxor_set_region(desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) cursor->addr_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) DMA_RXOR12 << DMA_CUED_REGION_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) ppc440spe_adma_dma2rxor_inc_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) desc, cursor, index, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) } else if (cursor->addrl == addr + cursor->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) /* reverse RXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) cursor->state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) cursor->xor_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) set_bit(cursor->addr_count, &desc->reverse_flags[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) if (index == src_cnt-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) ppc440spe_rxor_set_region(desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) cursor->addr_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) DMA_RXOR12 << DMA_CUED_REGION_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) ppc440spe_adma_dma2rxor_inc_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) desc, cursor, index, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) printk(KERN_ERR "Cannot build "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) "DMA2 RXOR command block.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) sign = test_bit(cursor->addr_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) desc->reverse_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) if (index == src_cnt-2 || (sign == -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) && addr != cursor->addrl - 2*cursor->len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) cursor->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) cursor->xor_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) cursor->addrl = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) ppc440spe_rxor_set_region(desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) cursor->addr_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) DMA_RXOR12 << DMA_CUED_REGION_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) ppc440spe_adma_dma2rxor_inc_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) desc, cursor, index, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) } else if (addr == cursor->addrl + 2*sign*cursor->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) cursor->state = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) cursor->xor_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) ppc440spe_rxor_set_region(desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) cursor->addr_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) DMA_RXOR123 << DMA_CUED_REGION_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (index == src_cnt-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) ppc440spe_adma_dma2rxor_inc_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) desc, cursor, index, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) } else if (addr == cursor->addrl + 3*cursor->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) cursor->state = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) cursor->xor_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) ppc440spe_rxor_set_region(desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) cursor->addr_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) DMA_RXOR124 << DMA_CUED_REGION_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) if (index == src_cnt-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) ppc440spe_adma_dma2rxor_inc_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) desc, cursor, index, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) } else if (addr == cursor->addrl + 4*cursor->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) cursor->state = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) cursor->xor_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) ppc440spe_rxor_set_region(desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) cursor->addr_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) DMA_RXOR125 << DMA_CUED_REGION_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) if (index == src_cnt-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) ppc440spe_adma_dma2rxor_inc_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) desc, cursor, index, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) cursor->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) cursor->xor_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) cursor->addrl = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) ppc440spe_rxor_set_region(desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) cursor->addr_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) DMA_RXOR12 << DMA_CUED_REGION_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) ppc440spe_adma_dma2rxor_inc_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) desc, cursor, index, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) cursor->state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) cursor->addrl = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) cursor->xor_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) if (index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) ppc440spe_adma_dma2rxor_inc_addr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) desc, cursor, index, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) * ppc440spe_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) static void ppc440spe_adma_dma2rxor_set_src(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) int index, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) struct xor_cb *xcb = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) int k = 0, op = 0, lop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) /* get the RXOR operand which corresponds to index addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) while (op <= index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) lop = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) if (k == XOR_MAX_OPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) desc = list_entry(desc->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) struct ppc440spe_adma_desc_slot, chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) xcb = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) (DMA_RXOR12 << DMA_CUED_REGION_OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) op += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) op += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) BUG_ON(k < 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) if (test_bit(k-1, desc->reverse_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) /* reverse operand order; put last op in RXOR group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) if (index == op - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) ppc440spe_rxor_set_src(desc, k - 1, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) /* direct operand order; put first op in RXOR group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) if (index == lop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) ppc440spe_rxor_set_src(desc, k - 1, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) * ppc440spe_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) * ppc440spe_adma_dma2rxor_prep_src() has already done prior this call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) static void ppc440spe_adma_dma2rxor_set_mult(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) struct ppc440spe_adma_desc_slot *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) int index, u8 mult)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) struct xor_cb *xcb = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) int k = 0, op = 0, lop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) /* get the RXOR operand which corresponds to index mult */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) while (op <= index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) lop = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) if (k == XOR_MAX_OPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) desc = list_entry(desc->chain_node.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) struct ppc440spe_adma_desc_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) xcb = desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) (DMA_RXOR12 << DMA_CUED_REGION_OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) op += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) op += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) BUG_ON(k < 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) if (test_bit(k-1, desc->reverse_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) /* reverse order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) ppc440spe_rxor_set_mult(desc, k - 1, op - index - 1, mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) /* direct order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) ppc440spe_rxor_set_mult(desc, k - 1, index - lop, mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) * ppc440spe_init_rxor_cursor -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) static void ppc440spe_init_rxor_cursor(struct ppc440spe_rxor *cursor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) memset(cursor, 0, sizeof(struct ppc440spe_rxor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) cursor->state = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) * ppc440spe_adma_pq_set_src_mult - set multiplication coefficient into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) * descriptor for the PQXOR operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) static void ppc440spe_adma_pq_set_src_mult(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) struct ppc440spe_adma_desc_slot *sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) unsigned char mult, int index, int dst_pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) u32 mult_idx, mult_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) chan = to_ppc440spe_adma_chan(sw_desc->async_tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) switch (chan->device->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) if (test_bit(PPC440SPE_DESC_RXOR, &sw_desc->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) int region = test_bit(PPC440SPE_DESC_RXOR12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) &sw_desc->flags) ? 2 : 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) if (index < region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) /* RXOR multipliers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) iter = ppc440spe_get_group_entry(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) sw_desc->dst_cnt - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) if (sw_desc->dst_cnt == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) iter1 = ppc440spe_get_group_entry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) sw_desc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) mult_dst = DMA_CDB_SG_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) /* WXOR multiplier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) iter = ppc440spe_get_group_entry(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) index - region +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) sw_desc->dst_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) mult_idx = DMA_CUED_MULT1_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) DMA_CDB_SG_DST1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) int znum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) /* WXOR-only;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) * skip first slots with destinations (if ZERO_DST has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) * place)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) if (test_bit(PPC440SPE_ZERO_P, &sw_desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) znum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if (test_bit(PPC440SPE_ZERO_Q, &sw_desc->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) znum++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) iter = ppc440spe_get_group_entry(sw_desc, index + znum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) mult_idx = DMA_CUED_MULT1_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) if (likely(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) ppc440spe_desc_set_src_mult(iter, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) mult_idx, mult_dst, mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) if (unlikely(iter1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) /* if we have two destinations for RXOR, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) * we've just set Q mult. Set-up P now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) ppc440spe_desc_set_src_mult(iter1, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) mult_idx, mult_dst, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) iter = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) if (sw_desc->dst_cnt == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) /* both P & Q calculations required; set P mult here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) ppc440spe_adma_dma2rxor_set_mult(iter, index, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) /* and then set Q mult */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) iter = ppc440spe_get_group_entry(sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) sw_desc->descs_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) ppc440spe_adma_dma2rxor_set_mult(iter, index, mult);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) * ppc440spe_adma_free_chan_resources - free the resources allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) struct ppc440spe_adma_chan *ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) struct ppc440spe_adma_desc_slot *iter, *_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) int in_use_descs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) ppc440spe_chan = to_ppc440spe_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) ppc440spe_adma_slot_cleanup(ppc440spe_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) spin_lock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) in_use_descs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) list_del(&iter->chain_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) list_for_each_entry_safe_reverse(iter, _iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) &ppc440spe_chan->all_slots, slot_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) list_del(&iter->slot_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) kfree(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) ppc440spe_chan->slots_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) ppc440spe_chan->last_used = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) dev_dbg(ppc440spe_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) "ppc440spe adma%d %s slots_allocated %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) ppc440spe_chan->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) __func__, ppc440spe_chan->slots_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) spin_unlock_bh(&ppc440spe_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) /* one is ok since we left it on there on purpose */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) if (in_use_descs > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) printk(KERN_ERR "SPE: Freeing %d in use descriptors!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) in_use_descs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) * @chan: ADMA channel handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) * @cookie: ADMA transaction identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) * @txstate: a holder for the current state of the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) dma_cookie_t cookie, struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) struct ppc440spe_adma_chan *ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) ppc440spe_chan = to_ppc440spe_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) ppc440spe_adma_slot_cleanup(ppc440spe_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) return dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) * ppc440spe_adma_eot_handler - end of transfer interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) static irqreturn_t ppc440spe_adma_eot_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) struct ppc440spe_adma_chan *chan = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) dev_dbg(chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) "ppc440spe adma%d: %s\n", chan->device->id, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) tasklet_schedule(&chan->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) ppc440spe_adma_device_clear_eot_status(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) * ppc440spe_adma_err_handler - DMA error interrupt handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) * do the same things as a eot handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) static irqreturn_t ppc440spe_adma_err_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) struct ppc440spe_adma_chan *chan = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) dev_dbg(chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) "ppc440spe adma%d: %s\n", chan->device->id, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) tasklet_schedule(&chan->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) ppc440spe_adma_device_clear_eot_status(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) * ppc440spe_test_callback - called when test operation has been done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) static void ppc440spe_test_callback(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) complete(&ppc440spe_r6_test_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) * ppc440spe_adma_issue_pending - flush all pending descriptors to h/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) struct ppc440spe_adma_chan *ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) ppc440spe_chan = to_ppc440spe_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) dev_dbg(ppc440spe_chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) __func__, ppc440spe_chan->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) if (ppc440spe_chan->pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) ppc440spe_chan->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) ppc440spe_chan_append(ppc440spe_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) * ppc440spe_chan_start_null_xor - initiate the first XOR operation (DMA engines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) * use FIFOs (as opposite to chains used in XOR) so this is a XOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) * specific operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) struct ppc440spe_adma_desc_slot *sw_desc, *group_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) int slot_cnt, slots_per_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) dev_dbg(chan->device->common.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) "ppc440spe adma%d: %s\n", chan->device->id, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) spin_lock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) slot_cnt = ppc440spe_chan_xor_slot_count(0, 2, &slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) sw_desc = ppc440spe_adma_alloc_slots(chan, slot_cnt, slots_per_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) group_start = sw_desc->group_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) list_splice_init(&sw_desc->group_list, &chan->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) async_tx_ack(&sw_desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) ppc440spe_desc_init_null_xor(group_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) cookie = dma_cookie_assign(&sw_desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) /* initialize the completed cookie to be less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) * the most recently used cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) chan->common.completed_cookie = cookie - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) /* channel should not be busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) BUG_ON(ppc440spe_chan_is_busy(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) /* set the descriptor address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) ppc440spe_chan_set_first_xor_descriptor(chan, sw_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) /* run the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) ppc440spe_chan_run(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) printk(KERN_ERR "ppc440spe adma%d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) " failed to allocate null descriptor\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) chan->device->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) spin_unlock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) * ppc440spe_test_raid6 - test are RAID-6 capabilities enabled successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) * For this we just perform one WXOR operation with the same source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) * and destination addresses, the GF-multiplier is 1; so if RAID-6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) * capabilities are enabled then we'll get src/dst filled with zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) static int ppc440spe_test_raid6(struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) struct ppc440spe_adma_desc_slot *sw_desc, *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) struct page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) char *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) dma_addr_t dma_addr, addrs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) unsigned long op = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) int rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) set_bit(PPC440SPE_DESC_WXOR, &op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) pg = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) if (!pg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) spin_lock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) sw_desc = ppc440spe_adma_alloc_slots(chan, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) if (sw_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) /* 1 src, 1 dsr, int_ena, WXOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) ppc440spe_desc_init_dma01pq(sw_desc, 1, 1, 1, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) iter->unmap_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) rval = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) spin_unlock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) spin_unlock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) /* Fill the test page with ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) memset(page_address(pg), 0xFF, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) dma_addr = dma_map_page(chan->device->dev, pg, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) /* Setup addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) ppc440spe_adma_pq_set_src(sw_desc, dma_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) ppc440spe_adma_pq_set_src_mult(sw_desc, 1, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) addrs[0] = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) addrs[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) ppc440spe_adma_pq_set_dest(sw_desc, addrs, DMA_PREP_PQ_DISABLE_Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) async_tx_ack(&sw_desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) sw_desc->async_tx.callback = ppc440spe_test_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) sw_desc->async_tx.callback_param = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) init_completion(&ppc440spe_r6_test_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) ppc440spe_adma_tx_submit(&sw_desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) ppc440spe_adma_issue_pending(&chan->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) wait_for_completion(&ppc440spe_r6_test_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) /* Now check if the test page is zeroed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) a = page_address(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) if ((*(u32 *)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) /* page is zero - RAID-6 enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) /* RAID-6 was not enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) rval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) __free_page(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) switch (adev->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) dma_cap_set(DMA_PQ, adev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) dma_cap_set(DMA_XOR, adev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) dma_cap_set(DMA_PQ, adev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) adev->common.cap_mask = adev->common.cap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) /* Set base routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) adev->common.device_alloc_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) ppc440spe_adma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) adev->common.device_free_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) ppc440spe_adma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) adev->common.device_tx_status = ppc440spe_adma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) /* Set prep routines based on capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) adev->common.device_prep_dma_memcpy =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) ppc440spe_adma_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) adev->common.max_xor = XOR_MAX_OPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) adev->common.device_prep_dma_xor =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) ppc440spe_adma_prep_dma_xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) switch (adev->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) dma_set_maxpq(&adev->common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) DMA0_FIFO_SIZE / sizeof(struct dma_cdb), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) dma_set_maxpq(&adev->common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) DMA1_FIFO_SIZE / sizeof(struct dma_cdb), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) case PPC440SPE_XOR_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) adev->common.max_pq = XOR_MAX_OPS * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) adev->common.device_prep_dma_pq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) ppc440spe_adma_prep_dma_pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) switch (adev->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) adev->common.max_pq = DMA0_FIFO_SIZE /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) sizeof(struct dma_cdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) adev->common.max_pq = DMA1_FIFO_SIZE /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) sizeof(struct dma_cdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) adev->common.device_prep_dma_pq_val =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) ppc440spe_adma_prep_dma_pqzero_sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) switch (adev->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) case PPC440SPE_DMA0_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) adev->common.max_xor = DMA0_FIFO_SIZE /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) sizeof(struct dma_cdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) case PPC440SPE_DMA1_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) adev->common.max_xor = DMA1_FIFO_SIZE /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) sizeof(struct dma_cdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) adev->common.device_prep_dma_xor_val =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) ppc440spe_adma_prep_dma_xor_zero_sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) adev->common.device_prep_dma_interrupt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) ppc440spe_adma_prep_dma_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) "( %s%s%s%s%s%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) dev_name(adev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "intr " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) struct ppc440spe_adma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) int *initcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) struct platform_device *ofdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) ofdev = container_of(adev->dev, struct platform_device, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) np = ofdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) if (adev->id != PPC440SPE_XOR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) adev->err_irq = irq_of_parse_and_map(np, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) if (!adev->err_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) dev_warn(adev->dev, "no err irq resource?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) *initcode = PPC_ADMA_INIT_IRQ2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) adev->err_irq = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) atomic_inc(&ppc440spe_adma_err_irq_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) adev->err_irq = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) adev->irq = irq_of_parse_and_map(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) if (!adev->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) dev_err(adev->dev, "no irq resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) *initcode = PPC_ADMA_INIT_IRQ1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) goto err_irq_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) dev_dbg(adev->dev, "irq %d, err irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) adev->irq, adev->err_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) ret = request_irq(adev->irq, ppc440spe_adma_eot_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 0, dev_driver_string(adev->dev), chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) dev_err(adev->dev, "can't request irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) adev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) *initcode = PPC_ADMA_INIT_IRQ1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) goto err_req1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) /* only DMA engines have a separate error IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) * so it's Ok if err_irq < 0 in XOR engine case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) if (adev->err_irq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) /* both DMA engines share common error IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) ret = request_irq(adev->err_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) ppc440spe_adma_err_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) dev_driver_string(adev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) dev_err(adev->dev, "can't request irq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) adev->err_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) *initcode = PPC_ADMA_INIT_IRQ2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) goto err_req2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) if (adev->id == PPC440SPE_XOR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) /* enable XOR engine interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) iowrite32be(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) &adev->xor_reg->ier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) u32 mask, enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) pr_err("%s: can't find I2O device tree node\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) goto err_req2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) adev->i2o_reg = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) if (!adev->i2o_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) pr_err("%s: failed to map I2O registers\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) goto err_req2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) /* Unmask 'CS FIFO Attention' interrupts and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) * enable generating interrupts on errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) enable = (adev->id == PPC440SPE_DMA0_ID) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) ~(I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) ~(I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) mask = ioread32(&adev->i2o_reg->iopim) & enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) iowrite32(mask, &adev->i2o_reg->iopim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) err_req2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) free_irq(adev->irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) err_req1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) irq_dispose_mapping(adev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) err_irq_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) if (adev->err_irq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) irq_dispose_mapping(adev->err_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) static void ppc440spe_adma_release_irqs(struct ppc440spe_adma_device *adev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) struct ppc440spe_adma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) u32 mask, disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) if (adev->id == PPC440SPE_XOR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) /* disable XOR engine interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) mask = ioread32be(&adev->xor_reg->ier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) mask &= ~(XOR_IE_CBCIE_BIT | XOR_IE_ICBIE_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) iowrite32be(mask, &adev->xor_reg->ier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) /* disable DMAx engine interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) disable = (adev->id == PPC440SPE_DMA0_ID) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) (I2O_IOPIM_P0SNE | I2O_IOPIM_P0EM) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) (I2O_IOPIM_P1SNE | I2O_IOPIM_P1EM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) mask = ioread32(&adev->i2o_reg->iopim) | disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) iowrite32(mask, &adev->i2o_reg->iopim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) free_irq(adev->irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) irq_dispose_mapping(adev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) if (adev->err_irq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) free_irq(adev->err_irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) if (atomic_dec_and_test(&ppc440spe_adma_err_irq_ref)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) irq_dispose_mapping(adev->err_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) iounmap(adev->i2o_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) * ppc440spe_adma_probe - probe the asynch device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) static int ppc440spe_adma_probe(struct platform_device *ofdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) struct device_node *np = ofdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) struct ppc440spe_adma_device *adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) struct ppc440spe_adma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) struct ppc_dma_chan_ref *ref, *_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) int ret = 0, initcode = PPC_ADMA_INIT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) const u32 *idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) void *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) u32 id, pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) if (of_device_is_compatible(np, "amcc,xor-accelerator")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) id = PPC440SPE_XOR_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) /* As far as the XOR engine is concerned, it does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) * use FIFOs but uses linked list. So there is no dependency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) * between pool size to allocate and the engine configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) pool_size = PAGE_SIZE << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) /* it is DMA0 or DMA1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) idx = of_get_property(np, "cell-index", &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) if (!idx || (len != sizeof(u32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) dev_err(&ofdev->dev, "Device node %pOF has missing "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) "or invalid cell-index property\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) id = *idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) /* DMA0,1 engines use FIFO to maintain CDBs, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) * should allocate the pool accordingly to size of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) * FIFO. Thus, the pool size depends on the FIFO depth:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) * how much CDBs pointers the FIFO may contain then so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) * much CDBs we should provide in the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) * That is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) * CDB size = 32B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) * CDBs number = (DMA0_FIFO_SIZE >> 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) * Pool size = CDBs number * CDB size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) pool_size = (id == PPC440SPE_DMA0_ID) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) pool_size <<= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) if (of_address_to_resource(np, 0, &res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) dev_err(&ofdev->dev, "failed to get memory resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) initcode = PPC_ADMA_INIT_MEMRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) if (!request_mem_region(res.start, resource_size(&res),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) dev_driver_string(&ofdev->dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) dev_err(&ofdev->dev, "failed to request memory region %pR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) initcode = PPC_ADMA_INIT_MEMREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) /* create a device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) adev = kzalloc(sizeof(*adev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) if (!adev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) initcode = PPC_ADMA_INIT_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) goto err_adev_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) adev->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) adev->pool_size = pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) /* allocate coherent memory for hardware descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) adev->pool_size, &adev->dma_desc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) if (adev->dma_desc_pool_virt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) dev_err(&ofdev->dev, "failed to allocate %d bytes of coherent "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) "memory for hardware descriptors\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) adev->pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) initcode = PPC_ADMA_INIT_COHERENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) goto err_dma_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) dev_dbg(&ofdev->dev, "allocated descriptor pool virt 0x%p phys 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) regs = ioremap(res.start, resource_size(&res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) if (!regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) dev_err(&ofdev->dev, "failed to ioremap regs!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) goto err_regs_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) if (adev->id == PPC440SPE_XOR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) adev->xor_reg = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) /* Reset XOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) iowrite32be(XOR_CRSR_XASR_BIT, &adev->xor_reg->crsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) iowrite32be(XOR_CRSR_64BA_BIT, &adev->xor_reg->crrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) size_t fifo_size = (adev->id == PPC440SPE_DMA0_ID) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) DMA0_FIFO_SIZE : DMA1_FIFO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) adev->dma_reg = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) /* DMAx_FIFO_SIZE is defined in bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) * <fsiz> - is defined in number of CDB pointers (8byte).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) * DMA FIFO Length = CSlength + CPlength, where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) * CSlength = CPlength = (fsiz + 1) * 8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) iowrite32(DMA_FIFO_ENABLE | ((fifo_size >> 3) - 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) &adev->dma_reg->fsiz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) /* Configure DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) iowrite32(DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) &adev->dma_reg->cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) /* Clear Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) iowrite32(~0, &adev->dma_reg->dsts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) adev->dev = &ofdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) adev->common.dev = &ofdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) INIT_LIST_HEAD(&adev->common.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) platform_set_drvdata(ofdev, adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) /* create a channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) chan = kzalloc(sizeof(*chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) if (!chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) initcode = PPC_ADMA_INIT_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) goto err_chan_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) spin_lock_init(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) INIT_LIST_HEAD(&chan->chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) INIT_LIST_HEAD(&chan->all_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) chan->device = adev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) chan->common.device = &adev->common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) dma_cookie_init(&chan->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) list_add_tail(&chan->common.device_node, &adev->common.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) tasklet_setup(&chan->irq_tasklet, ppc440spe_adma_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) /* allocate and map helper pages for async validation or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) * async_mult/async_sum_product operations on DMA0/1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) if (adev->id != PPC440SPE_XOR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) chan->pdest_page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) chan->qdest_page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) if (!chan->pdest_page ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) !chan->qdest_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) if (chan->pdest_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) __free_page(chan->pdest_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) if (chan->qdest_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) __free_page(chan->qdest_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) goto err_page_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) chan->pdest = dma_map_page(&ofdev->dev, chan->pdest_page, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) chan->qdest = dma_map_page(&ofdev->dev, chan->qdest_page, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) ref = kmalloc(sizeof(*ref), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (ref) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) ref->chan = &chan->common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) INIT_LIST_HEAD(&ref->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) list_add_tail(&ref->node, &ppc440spe_adma_chan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) dev_err(&ofdev->dev, "failed to allocate channel reference!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) goto err_ref_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) ret = ppc440spe_adma_setup_irqs(adev, chan, &initcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) ppc440spe_adma_init_capabilities(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) ret = dma_async_device_register(&adev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) initcode = PPC_ADMA_INIT_REGISTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) dev_err(&ofdev->dev, "failed to register dma device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) goto err_dev_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) err_dev_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) ppc440spe_adma_release_irqs(adev, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) err_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) if (chan == to_ppc440spe_adma_chan(ref->chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) list_del(&ref->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) err_ref_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) if (adev->id != PPC440SPE_XOR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) dma_unmap_page(&ofdev->dev, chan->pdest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) dma_unmap_page(&ofdev->dev, chan->qdest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) __free_page(chan->pdest_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) __free_page(chan->qdest_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) err_page_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) kfree(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) err_chan_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) if (adev->id == PPC440SPE_XOR_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) iounmap(adev->xor_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) iounmap(adev->dma_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) err_regs_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) dma_free_coherent(adev->dev, adev->pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) adev->dma_desc_pool_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) adev->dma_desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) err_dma_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) kfree(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) err_adev_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) release_mem_region(res.start, resource_size(&res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) if (id < PPC440SPE_ADMA_ENGINES_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) ppc440spe_adma_devices[id] = initcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) * ppc440spe_adma_remove - remove the asynch device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) static int ppc440spe_adma_remove(struct platform_device *ofdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) struct ppc440spe_adma_device *adev = platform_get_drvdata(ofdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) struct device_node *np = ofdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) struct dma_chan *chan, *_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) struct ppc_dma_chan_ref *ref, *_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) struct ppc440spe_adma_chan *ppc440spe_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) if (adev->id < PPC440SPE_ADMA_ENGINES_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) ppc440spe_adma_devices[adev->id] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) dma_async_device_unregister(&adev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) list_for_each_entry_safe(chan, _chan, &adev->common.channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) ppc440spe_chan = to_ppc440spe_adma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) ppc440spe_adma_release_irqs(adev, ppc440spe_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) tasklet_kill(&ppc440spe_chan->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) if (adev->id != PPC440SPE_XOR_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) dma_unmap_page(&ofdev->dev, ppc440spe_chan->pdest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) dma_unmap_page(&ofdev->dev, ppc440spe_chan->qdest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) __free_page(ppc440spe_chan->pdest_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) __free_page(ppc440spe_chan->qdest_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) list_for_each_entry_safe(ref, _ref, &ppc440spe_adma_chan_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) if (ppc440spe_chan ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) to_ppc440spe_adma_chan(ref->chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) list_del(&ref->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) kfree(ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) list_del(&chan->device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) kfree(ppc440spe_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) dma_free_coherent(adev->dev, adev->pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) adev->dma_desc_pool_virt, adev->dma_desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) if (adev->id == PPC440SPE_XOR_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) iounmap(adev->xor_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) iounmap(adev->dma_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) of_address_to_resource(np, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) release_mem_region(res.start, resource_size(&res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) kfree(adev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) * /sys driver interface to enable h/w RAID-6 capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) * Files created in e.g. /sys/devices/plb.0/400100100.dma0/driver/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) * directory are "devices", "enable" and "poly".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) * "devices" shows available engines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) * "enable" is used to enable RAID-6 capabilities or to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) * whether these has been activated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) * "poly" allows setting/checking used polynomial (for PPC440SPe only).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) static ssize_t devices_show(struct device_driver *dev, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) ssize_t size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) if (ppc440spe_adma_devices[i] == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) size += scnprintf(buf + size, PAGE_SIZE - size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) "PPC440SP(E)-ADMA.%d: %s\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) ppc_adma_errors[ppc440spe_adma_devices[i]]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) static DRIVER_ATTR_RO(devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) static ssize_t enable_show(struct device_driver *dev, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) return snprintf(buf, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) ppc440spe_r6_enabled ? "EN" : "DIS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) static ssize_t enable_store(struct device_driver *dev, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) if (!count || count > 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) if (!ppc440spe_r6_tchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) /* Write a key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) sscanf(buf, "%lx", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) isync();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) /* Verify whether it really works now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) if (ppc440spe_test_raid6(ppc440spe_r6_tchan) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) pr_info("PPC440SP(e) RAID-6 has been activated "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) "successfully\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) ppc440spe_r6_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) pr_info("PPC440SP(e) RAID-6 hasn't been activated!"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) " Error key ?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) ppc440spe_r6_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) static DRIVER_ATTR_RW(enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) static ssize_t poly_show(struct device_driver *dev, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) ssize_t size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) #ifdef CONFIG_440SP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) /* 440SP has fixed polynomial */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) reg = 0x4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) reg >>= MQ0_CFBHL_POLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) reg &= 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) "uses 0x1%02x polynomial.\n", reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) static ssize_t poly_store(struct device_driver *dev, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) unsigned long reg, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) #ifdef CONFIG_440SP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) /* 440SP uses default 0x14D polynomial only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) if (!count || count > 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) /* e.g., 0x14D or 0x11D */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) sscanf(buf, "%lx", &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) if (val & ~0x1FF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) val &= 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) reg = dcr_read(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) reg &= ~(0xFF << MQ0_CFBHL_POLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) reg |= val << MQ0_CFBHL_POLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) static DRIVER_ATTR_RW(poly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) * Common initialisation for RAID engines; allocate memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) * DMAx FIFOs, perform configuration common for all DMA engines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) * Further DMA engine specific configuration is done at probe time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) static int ppc440spe_configure_raid_devices(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) struct resource i2o_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) struct i2o_regs __iomem *i2o_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) dcr_host_t i2o_dcr_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) unsigned int dcr_base, dcr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) np = of_find_compatible_node(NULL, NULL, "ibm,i2o-440spe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) pr_err("%s: can't find I2O device tree node\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) if (of_address_to_resource(np, 0, &i2o_res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) i2o_reg = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) if (!i2o_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) pr_err("%s: failed to map I2O registers\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) /* Get I2O DCRs base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) dcr_base = dcr_resource_start(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) dcr_len = dcr_resource_len(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) if (!dcr_base && !dcr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) pr_err("%pOF: can't get DCR registers base/len!\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) iounmap(i2o_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) i2o_dcr_host = dcr_map(np, dcr_base, dcr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) if (!DCR_MAP_OK(i2o_dcr_host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) pr_err("%pOF: failed to map DCRs!\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) iounmap(i2o_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) * the base address of FIFO memory space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) * Actually we need twice more physical memory than programmed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) * <fsiz> register (because there are two FIFOs for each DMA: CP and CS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) ppc440spe_dma_fifo_buf = kmalloc((DMA0_FIFO_SIZE + DMA1_FIFO_SIZE) << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) if (!ppc440spe_dma_fifo_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) pr_err("%s: DMA FIFO buffer allocation failed.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) iounmap(i2o_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) dcr_unmap(i2o_dcr_host, dcr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) * Configure h/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) /* Reset I2O/DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) mtdcri(SDR0, DCRN_SDR0_SRST, DCRN_SDR0_SRST_I2ODMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) mtdcri(SDR0, DCRN_SDR0_SRST, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) /* Setup the base address of mmaped registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) dcr_write(i2o_dcr_host, DCRN_I2O0_IBAH, (u32)(i2o_res.start >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) dcr_write(i2o_dcr_host, DCRN_I2O0_IBAL, (u32)(i2o_res.start) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) I2O_REG_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) dcr_unmap(i2o_dcr_host, dcr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) /* Setup FIFO memory space base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) iowrite32(0, &i2o_reg->ifbah);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) iowrite32(((u32)__pa(ppc440spe_dma_fifo_buf)), &i2o_reg->ifbal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) /* set zero FIFO size for I2O, so the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) * ppc440spe_dma_fifo_buf is used by DMAs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) * DMAx_FIFOs will be configured while probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) iowrite32(0, &i2o_reg->ifsiz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) iounmap(i2o_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) /* To prepare WXOR/RXOR functionality we need access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) * Memory Queue Module DCRs (finally it will be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) * via /sys interface of the ppc440spe ADMA driver).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) np = of_find_compatible_node(NULL, NULL, "ibm,mq-440spe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) pr_err("%s: can't find MQ device tree node\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) /* Get MQ DCRs base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) dcr_base = dcr_resource_start(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) dcr_len = dcr_resource_len(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) if (!dcr_base && !dcr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) pr_err("%pOF: can't get DCR registers base/len!\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) goto out_mq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) ppc440spe_mq_dcr_host = dcr_map(np, dcr_base, dcr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) if (!DCR_MAP_OK(ppc440spe_mq_dcr_host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) pr_err("%pOF: failed to map DCRs!\n", np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) goto out_mq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) ppc440spe_mq_dcr_len = dcr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) /* Set HB alias */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) /* Set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) * - LL transaction passing limit to 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) * - Memory controller cycle limit to 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) * - Galois Polynomial to 0x14d (default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_CFBHL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) (1 << MQ0_CFBHL_TPLM) | (1 << MQ0_CFBHL_HBCL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) (PPC440SPE_DEFAULT_POLY << MQ0_CFBHL_POLY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) atomic_set(&ppc440spe_adma_err_irq_ref, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) ppc440spe_adma_devices[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) out_mq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) kfree(ppc440spe_dma_fifo_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) static const struct of_device_id ppc440spe_adma_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) { .compatible = "ibm,dma-440spe", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) { .compatible = "amcc,xor-accelerator", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) MODULE_DEVICE_TABLE(of, ppc440spe_adma_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) static struct platform_driver ppc440spe_adma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) .probe = ppc440spe_adma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) .remove = ppc440spe_adma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) .name = "PPC440SP(E)-ADMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) .of_match_table = ppc440spe_adma_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) static __init int ppc440spe_adma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) ret = ppc440spe_configure_raid_devices();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) ret = platform_driver_register(&ppc440spe_adma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) pr_err("%s: failed to register platform driver\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) goto out_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) /* Initialization status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) ret = driver_create_file(&ppc440spe_adma_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) &driver_attr_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) goto out_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) /* RAID-6 h/w enable entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) ret = driver_create_file(&ppc440spe_adma_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) &driver_attr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) goto out_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) /* GF polynomial to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) ret = driver_create_file(&ppc440spe_adma_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) &driver_attr_poly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) driver_remove_file(&ppc440spe_adma_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) &driver_attr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) out_en:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) driver_remove_file(&ppc440spe_adma_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) &driver_attr_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) out_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) /* User will not be able to enable h/w RAID-6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) pr_err("%s: failed to create RAID-6 driver interface\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) platform_driver_unregister(&ppc440spe_adma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) out_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) kfree(ppc440spe_dma_fifo_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) static void __exit ppc440spe_adma_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) driver_remove_file(&ppc440spe_adma_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) &driver_attr_poly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) driver_remove_file(&ppc440spe_adma_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) &driver_attr_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) driver_remove_file(&ppc440spe_adma_driver.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) &driver_attr_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) platform_driver_unregister(&ppc440spe_adma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) dcr_unmap(ppc440spe_mq_dcr_host, ppc440spe_mq_dcr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) kfree(ppc440spe_dma_fifo_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) arch_initcall(ppc440spe_adma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) module_exit(ppc440spe_adma_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) MODULE_AUTHOR("Yuri Tikhonov <yur@emcraft.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) MODULE_DESCRIPTION("PPC440SPE ADMA Engine Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) MODULE_LICENSE("GPL");