^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2017 Broadcom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * modify it under the terms of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * published by the Free Software Foundation version 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This program is distributed "as is" WITHOUT ANY WARRANTY of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * kind, whether express or implied; without even the implied warranty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Broadcom SBA RAID Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * The Broadcom stream buffer accelerator (SBA) provides offloading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * capabilities for RAID operations. The SBA offload engine is accessible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * via Broadcom SoC specific ring manager. Two or more offload engines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * can share same Broadcom SoC specific ring manager due to this Broadcom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * SoC specific ring manager driver is implemented as a mailbox controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * driver and offload engine drivers are implemented as mallbox clients.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Typically, Broadcom SoC specific ring manager will implement larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * number of hardware rings over one or more SBA hardware devices. By
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * design, the internal buffer size of SBA hardware device is limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * but all offload operations supported by SBA can be broken down into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * multiple small size requests and executed parallely on multiple SBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * hardware devices for achieving high through-put.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * The Broadcom SBA RAID driver does not require any register programming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * except submitting request to SBA hardware device via mailbox channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * This driver implements a DMA device with one DMA channel using a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * mailbox channel provided by Broadcom SoC specific ring manager driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * For having more SBA DMA channels, we can create more SBA device nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * in Broadcom SoC specific DTS based on number of hardware rings supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * by Broadcom SoC ring manager.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/mailbox_client.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/mailbox/brcm-message.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/raid/pq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* ====== Driver macros and defines ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SBA_TYPE_SHIFT 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SBA_TYPE_MASK GENMASK(1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SBA_TYPE_A 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define SBA_TYPE_B 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define SBA_TYPE_C 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SBA_USER_DEF_SHIFT 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SBA_USER_DEF_MASK GENMASK(15, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define SBA_R_MDATA_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define SBA_R_MDATA_MASK GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define SBA_C_MDATA_MS_SHIFT 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SBA_INT_SHIFT 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SBA_INT_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SBA_RESP_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SBA_RESP_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define SBA_C_MDATA_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define SBA_C_MDATA_MASK GENMASK(7, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define SBA_C_MDATA_DNUM_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define SBA_CMD_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define SBA_CMD_MASK GENMASK(3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define SBA_CMD_ZERO_BUFFER 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define SBA_CMD_ZERO_ALL_BUFFERS 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define SBA_CMD_LOAD_BUFFER 0x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define SBA_CMD_XOR 0xa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define SBA_CMD_GALOIS_XOR 0xb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define SBA_CMD_WRITE_BUFFER 0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define SBA_CMD_GALOIS 0xe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Driver helper macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define to_sba_request(tx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) container_of(tx, struct sba_request, tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define to_sba_device(dchan) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) container_of(dchan, struct sba_device, dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* ===== Driver data structures ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) enum sba_request_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) SBA_REQUEST_STATE_FREE = 0x001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) SBA_REQUEST_STATE_ALLOCED = 0x002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) SBA_REQUEST_STATE_PENDING = 0x004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) SBA_REQUEST_STATE_ACTIVE = 0x008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) SBA_REQUEST_STATE_ABORTED = 0x010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) SBA_REQUEST_STATE_MASK = 0x0ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) SBA_REQUEST_FENCE = 0x100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct sba_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* Global state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct sba_device *sba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Chained requests management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct sba_request *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct list_head next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) atomic_t next_pending_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* BRCM message data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct brcm_message msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct dma_async_tx_descriptor tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* SBA commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct brcm_sba_command cmds[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) enum sba_version {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) SBA_VER_1 = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) SBA_VER_2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct sba_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Underlying device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* DT configuration parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enum sba_version ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Derived configuration parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 max_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u32 hw_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u32 hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u32 max_pq_coefs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u32 max_pq_srcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u32 max_cmd_per_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u32 max_xor_srcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u32 max_resp_pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 max_cmds_pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Maibox client and Mailbox channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct mbox_client client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct mbox_chan *mchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct device *mbox_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* DMA device and DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct dma_device dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct dma_chan dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* DMA channel resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void *resp_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) dma_addr_t resp_dma_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void *cmds_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) dma_addr_t cmds_dma_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spinlock_t reqs_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) bool reqs_fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct list_head reqs_alloc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct list_head reqs_pending_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct list_head reqs_active_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct list_head reqs_aborted_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct list_head reqs_free_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* DebugFS directory entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct dentry *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* ====== Command helper routines ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) cmd &= ~((u64)mask << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cmd |= ((u64)(val & mask) << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return b0 & SBA_C_MDATA_BNUMx_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return b0 & SBA_C_MDATA_BNUMx_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return (b0 & SBA_C_MDATA_BNUMx_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return (b0 & SBA_C_MDATA_BNUMx_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* ====== General helper routines ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static struct sba_request *sba_alloc_request(struct sba_device *sba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct sba_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) spin_lock_irqsave(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) list_for_each_entry(req, &sba->reqs_free_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (async_tx_test_ack(&req->tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) list_move_tail(&req->node, &sba->reqs_alloc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) spin_unlock_irqrestore(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * We have no more free requests so, we peek
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * mailbox channels hoping few active requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * would have completed which will create more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * room for new requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) mbox_client_peek_data(sba->mchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) req->flags = SBA_REQUEST_STATE_ALLOCED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) req->first = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) INIT_LIST_HEAD(&req->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) atomic_set(&req->next_pending_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) async_tx_ack(&req->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* Note: Must be called with sba->reqs_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void _sba_pending_request(struct sba_device *sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct sba_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) lockdep_assert_held(&sba->reqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) req->flags &= ~SBA_REQUEST_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) req->flags |= SBA_REQUEST_STATE_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) list_move_tail(&req->node, &sba->reqs_pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (list_empty(&sba->reqs_active_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) sba->reqs_fence = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* Note: Must be called with sba->reqs_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static bool _sba_active_request(struct sba_device *sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct sba_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) lockdep_assert_held(&sba->reqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (list_empty(&sba->reqs_active_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) sba->reqs_fence = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (sba->reqs_fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) req->flags &= ~SBA_REQUEST_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) req->flags |= SBA_REQUEST_STATE_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) list_move_tail(&req->node, &sba->reqs_active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (req->flags & SBA_REQUEST_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) sba->reqs_fence = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Note: Must be called with sba->reqs_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void _sba_abort_request(struct sba_device *sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct sba_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) lockdep_assert_held(&sba->reqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) req->flags &= ~SBA_REQUEST_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) req->flags |= SBA_REQUEST_STATE_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) list_move_tail(&req->node, &sba->reqs_aborted_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (list_empty(&sba->reqs_active_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) sba->reqs_fence = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* Note: Must be called with sba->reqs_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void _sba_free_request(struct sba_device *sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct sba_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) lockdep_assert_held(&sba->reqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) req->flags &= ~SBA_REQUEST_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) req->flags |= SBA_REQUEST_STATE_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) list_move_tail(&req->node, &sba->reqs_free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (list_empty(&sba->reqs_active_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) sba->reqs_fence = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void sba_free_chained_requests(struct sba_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct sba_request *nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct sba_device *sba = req->sba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) spin_lock_irqsave(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) _sba_free_request(sba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) list_for_each_entry(nreq, &req->next, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) _sba_free_request(sba, nreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) spin_unlock_irqrestore(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void sba_chain_request(struct sba_request *first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct sba_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct sba_device *sba = req->sba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) spin_lock_irqsave(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) list_add_tail(&req->next, &first->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) req->first = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) atomic_inc(&first->next_pending_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) spin_unlock_irqrestore(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void sba_cleanup_nonpending_requests(struct sba_device *sba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct sba_request *req, *req1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) spin_lock_irqsave(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Freeup all alloced request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) _sba_free_request(sba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Set all active requests as aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) _sba_abort_request(sba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Note: We expect that aborted request will be eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * freed by sba_receive_message()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) spin_unlock_irqrestore(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void sba_cleanup_pending_requests(struct sba_device *sba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct sba_request *req, *req1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) spin_lock_irqsave(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Freeup all pending request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) _sba_free_request(sba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) spin_unlock_irqrestore(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static int sba_send_mbox_request(struct sba_device *sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct sba_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* Send message for the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) req->msg.error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ret = mbox_send_message(sba->mchan, &req->msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dev_err(sba->dev, "send message failed with error %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Check error returned by mailbox controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ret = req->msg.error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dev_err(sba->dev, "message error %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Signal txdone for mailbox channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) mbox_client_txdone(sba->mchan, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Note: Must be called with sba->reqs_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void _sba_process_pending_requests(struct sba_device *sba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct sba_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* Process few pending requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) while (!list_empty(&sba->reqs_pending_list) && count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Get the first pending request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) req = list_first_entry(&sba->reqs_pending_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct sba_request, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* Try to make request active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!_sba_active_request(sba, req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* Send request to mailbox channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ret = sba_send_mbox_request(sba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) _sba_pending_request(sba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static void sba_process_received_request(struct sba_device *sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct sba_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct sba_request *nreq, *first = req->first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Process only after all chained requests are received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!atomic_dec_return(&first->next_pending_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) tx = &first->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) WARN_ON(tx->cookie < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (tx->cookie > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) spin_lock_irqsave(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dma_cookie_complete(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) spin_unlock_irqrestore(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dmaengine_desc_get_callback_invoke(tx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) dma_descriptor_unmap(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) tx->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) tx->callback_result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) dma_run_dependencies(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) spin_lock_irqsave(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* Free all requests chained to first request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) list_for_each_entry(nreq, &first->next, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) _sba_free_request(sba, nreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) INIT_LIST_HEAD(&first->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* Free the first request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) _sba_free_request(sba, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /* Process pending requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) _sba_process_pending_requests(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) spin_unlock_irqrestore(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static void sba_write_stats_in_seqfile(struct sba_device *sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct seq_file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct sba_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) u32 free_count = 0, alloced_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) u32 pending_count = 0, active_count = 0, aborted_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) spin_lock_irqsave(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) list_for_each_entry(req, &sba->reqs_free_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (async_tx_test_ack(&req->tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) free_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) list_for_each_entry(req, &sba->reqs_alloc_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) alloced_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) list_for_each_entry(req, &sba->reqs_pending_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pending_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) list_for_each_entry(req, &sba->reqs_active_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) active_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) list_for_each_entry(req, &sba->reqs_aborted_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) aborted_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) spin_unlock_irqrestore(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) seq_printf(file, "maximum requests = %d\n", sba->max_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) seq_printf(file, "free requests = %d\n", free_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) seq_printf(file, "alloced requests = %d\n", alloced_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) seq_printf(file, "pending requests = %d\n", pending_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) seq_printf(file, "active requests = %d\n", active_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) seq_printf(file, "aborted requests = %d\n", aborted_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* ====== DMAENGINE callbacks ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static void sba_free_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * Channel resources are pre-alloced so we just free-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * whatever we can so that we can re-use pre-alloced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * channel resources next time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) sba_cleanup_nonpending_requests(to_sba_device(dchan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static int sba_device_terminate_all(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* Cleanup all pending requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) sba_cleanup_pending_requests(to_sba_device(dchan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static void sba_issue_pending(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct sba_device *sba = to_sba_device(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Process pending requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) spin_lock_irqsave(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) _sba_process_pending_requests(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) spin_unlock_irqrestore(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct sba_device *sba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct sba_request *req, *nreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (unlikely(!tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) sba = to_sba_device(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) req = to_sba_request(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* Assign cookie and mark all chained requests pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) spin_lock_irqsave(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) _sba_pending_request(sba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) list_for_each_entry(nreq, &req->next, next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) _sba_pending_request(sba, nreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) spin_unlock_irqrestore(&sba->reqs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static enum dma_status sba_tx_status(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct sba_device *sba = to_sba_device(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ret = dma_cookie_status(dchan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) mbox_client_peek_data(sba->mchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return dma_cookie_status(dchan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static void sba_fillup_interrupt_msg(struct sba_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) struct brcm_sba_command *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct brcm_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) u64 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) u32 c_mdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dma_addr_t resp_dma = req->tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct brcm_sba_command *cmdsp = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* Type-B command to load dummy data into buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) c_mdata = sba_cmd_load_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) cmdsp->data = resp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) cmdsp->data_len = req->sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Type-A command to write buf0 to dummy location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) cmd = sba_cmd_enc(cmd, 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) SBA_RESP_SHIFT, SBA_RESP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) c_mdata = sba_cmd_write_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (req->sba->hw_resp_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) cmdsp->resp = resp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) cmdsp->resp_len = req->sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) cmdsp->data = resp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) cmdsp->data_len = req->sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* Fillup brcm_message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) msg->type = BRCM_MESSAGE_SBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) msg->sba.cmds = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) msg->sba.cmds_count = cmdsp - cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) msg->ctx = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) msg->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct sba_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct sba_device *sba = to_sba_device(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* Alloc new request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) req = sba_alloc_request(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Force fence so that no requests are submitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * until DMA callback for this request is invoked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) req->flags |= SBA_REQUEST_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Fillup request message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* Init async_tx descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) req->tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) req->tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return &req->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static void sba_fillup_memcpy_msg(struct sba_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct brcm_sba_command *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct brcm_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dma_addr_t msg_offset, size_t msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dma_addr_t dst, dma_addr_t src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) u64 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) u32 c_mdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dma_addr_t resp_dma = req->tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct brcm_sba_command *cmdsp = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* Type-B command to load data into buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) c_mdata = sba_cmd_load_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) cmdsp->data = src + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* Type-A command to write buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) cmd = sba_cmd_enc(cmd, 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) SBA_RESP_SHIFT, SBA_RESP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) c_mdata = sba_cmd_write_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (req->sba->hw_resp_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) cmdsp->resp = resp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) cmdsp->resp_len = req->sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) cmdsp->data = dst + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* Fillup brcm_message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) msg->type = BRCM_MESSAGE_SBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) msg->sba.cmds = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) msg->sba.cmds_count = cmdsp - cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) msg->ctx = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) msg->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static struct sba_request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) sba_prep_dma_memcpy_req(struct sba_device *sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) dma_addr_t off, dma_addr_t dst, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct sba_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /* Alloc new request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) req = sba_alloc_request(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (flags & DMA_PREP_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) req->flags |= SBA_REQUEST_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /* Fillup request message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) off, len, dst, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* Init async_tx descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) req->tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) req->tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) size_t req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dma_addr_t off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct sba_device *sba = to_sba_device(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct sba_request *first = NULL, *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* Create chained requests where each request is upto hw_buf_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) req = sba_prep_dma_memcpy_req(sba, off, dst, src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) req_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) sba_free_chained_requests(first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) sba_chain_request(first, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) first = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) off += req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) len -= req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return (first) ? &first->tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static void sba_fillup_xor_msg(struct sba_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct brcm_sba_command *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct brcm_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) dma_addr_t msg_offset, size_t msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) u64 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) u32 c_mdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) dma_addr_t resp_dma = req->tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct brcm_sba_command *cmdsp = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /* Type-B command to load data into buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) c_mdata = sba_cmd_load_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) cmdsp->data = src[0] + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Type-B commands to xor data with buf0 and put it back in buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) for (i = 1; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) c_mdata = sba_cmd_xor_c_mdata(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) cmdsp->data = src[i] + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /* Type-A command to write buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) cmd = sba_cmd_enc(cmd, 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) SBA_RESP_SHIFT, SBA_RESP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) c_mdata = sba_cmd_write_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (req->sba->hw_resp_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) cmdsp->resp = resp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) cmdsp->resp_len = req->sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) cmdsp->data = dst + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /* Fillup brcm_message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) msg->type = BRCM_MESSAGE_SBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) msg->sba.cmds = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) msg->sba.cmds_count = cmdsp - cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) msg->ctx = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) msg->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static struct sba_request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) sba_prep_dma_xor_req(struct sba_device *sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) u32 src_cnt, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct sba_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* Alloc new request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) req = sba_alloc_request(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (flags & DMA_PREP_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) req->flags |= SBA_REQUEST_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* Fillup request message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) sba_fillup_xor_msg(req, req->cmds, &req->msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) off, len, dst, src, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* Init async_tx descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) req->tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) req->tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) u32 src_cnt, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) size_t req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) dma_addr_t off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) struct sba_device *sba = to_sba_device(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct sba_request *first = NULL, *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* Sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (unlikely(src_cnt > sba->max_xor_srcs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* Create chained requests where each request is upto hw_buf_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) req_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) sba_free_chained_requests(first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) sba_chain_request(first, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) first = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) off += req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) len -= req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return (first) ? &first->tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) static void sba_fillup_pq_msg(struct sba_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) bool pq_continue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct brcm_sba_command *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct brcm_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) dma_addr_t msg_offset, size_t msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dma_addr_t *dst_p, dma_addr_t *dst_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) const u8 *scf, dma_addr_t *src, u32 src_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) u64 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) u32 c_mdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) dma_addr_t resp_dma = req->tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct brcm_sba_command *cmdsp = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (pq_continue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* Type-B command to load old P into buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (dst_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) c_mdata = sba_cmd_load_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) cmdsp->data = *dst_p + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* Type-B command to load old Q into buf1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (dst_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) c_mdata = sba_cmd_load_c_mdata(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) cmdsp->data = *dst_q + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* Type-A command to zero all buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* Type-B commands for generate P onto buf0 and Q onto buf1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) for (i = 0; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) cmdsp->data = src[i] + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Type-A command to write buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (dst_p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) cmd = sba_cmd_enc(cmd, 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) SBA_RESP_SHIFT, SBA_RESP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) c_mdata = sba_cmd_write_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (req->sba->hw_resp_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) cmdsp->resp = resp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) cmdsp->resp_len = req->sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) cmdsp->data = *dst_p + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* Type-A command to write buf1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (dst_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) cmd = sba_cmd_enc(cmd, 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) SBA_RESP_SHIFT, SBA_RESP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) c_mdata = sba_cmd_write_c_mdata(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (req->sba->hw_resp_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) cmdsp->resp = resp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) cmdsp->resp_len = req->sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) cmdsp->data = *dst_q + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* Fillup brcm_message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) msg->type = BRCM_MESSAGE_SBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) msg->sba.cmds = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) msg->sba.cmds_count = cmdsp - cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) msg->ctx = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) msg->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static struct sba_request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct sba_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* Alloc new request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) req = sba_alloc_request(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (flags & DMA_PREP_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) req->flags |= SBA_REQUEST_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /* Fillup request messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) sba_fillup_pq_msg(req, dmaf_continue(flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) req->cmds, &req->msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) off, len, dst_p, dst_q, scf, src, src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* Init async_tx descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) req->tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) req->tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static void sba_fillup_pq_single_msg(struct sba_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) bool pq_continue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct brcm_sba_command *cmds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct brcm_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) dma_addr_t msg_offset, size_t msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) dma_addr_t *dst_p, dma_addr_t *dst_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) dma_addr_t src, u8 scf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) u64 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) u32 c_mdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) u8 pos, dpos = raid6_gflog[scf];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) dma_addr_t resp_dma = req->tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) struct brcm_sba_command *cmdsp = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (!dst_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) goto skip_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (pq_continue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /* Type-B command to load old P into buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) c_mdata = sba_cmd_load_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) cmdsp->data = *dst_p + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * Type-B commands to xor data with buf0 and put it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * back in buf0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) c_mdata = sba_cmd_xor_c_mdata(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) cmdsp->data = src + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /* Type-B command to load old P into buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) c_mdata = sba_cmd_load_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) cmdsp->data = src + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /* Type-A command to write buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) cmd = sba_cmd_enc(cmd, 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) SBA_RESP_SHIFT, SBA_RESP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) c_mdata = sba_cmd_write_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (req->sba->hw_resp_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) cmdsp->resp = resp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) cmdsp->resp_len = req->sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) cmdsp->data = *dst_p + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) skip_p:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (!dst_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) goto skip_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* Type-A command to zero all buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (dpos == 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) goto skip_q_computation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) pos = (dpos < req->sba->max_pq_coefs) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) dpos : (req->sba->max_pq_coefs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * Type-B command to generate initial Q from data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * and store output into buf0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) cmdsp->data = src + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) dpos -= pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /* Multiple Type-A command to generate final Q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) while (dpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) pos = (dpos < req->sba->max_pq_coefs) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) dpos : (req->sba->max_pq_coefs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * Type-A command to generate Q with buf0 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * buf1 store result in buf0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) dpos -= pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) skip_q_computation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (pq_continue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * Type-B command to XOR previous output with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * buf0 and write it into buf0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) c_mdata = sba_cmd_xor_c_mdata(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) cmdsp->data = *dst_q + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /* Type-A command to write buf0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) SBA_TYPE_SHIFT, SBA_TYPE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) cmd = sba_cmd_enc(cmd, msg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) cmd = sba_cmd_enc(cmd, 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) SBA_RESP_SHIFT, SBA_RESP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) c_mdata = sba_cmd_write_c_mdata(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) SBA_CMD_SHIFT, SBA_CMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) cmdsp->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) *cmdsp->cmd_dma = cpu_to_le64(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (req->sba->hw_resp_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) cmdsp->resp = resp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) cmdsp->resp_len = req->sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) cmdsp->data = *dst_q + msg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) cmdsp->data_len = msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) cmdsp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) skip_q:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* Fillup brcm_message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) msg->type = BRCM_MESSAGE_SBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) msg->sba.cmds = cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) msg->sba.cmds_count = cmdsp - cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) msg->ctx = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) msg->error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static struct sba_request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) dma_addr_t *dst_p, dma_addr_t *dst_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) dma_addr_t src, u8 scf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) struct sba_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /* Alloc new request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) req = sba_alloc_request(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (flags & DMA_PREP_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) req->flags |= SBA_REQUEST_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /* Fillup request messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) sba_fillup_pq_single_msg(req, dmaf_continue(flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) req->cmds, &req->msg, off, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) dst_p, dst_q, src, scf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* Init async_tx descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) req->tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) req->tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) u32 i, dst_q_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) size_t req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) bool slow = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) dma_addr_t off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) dma_addr_t *dst_p = NULL, *dst_q = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) struct sba_device *sba = to_sba_device(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct sba_request *first = NULL, *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /* Sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (unlikely(src_cnt > sba->max_pq_srcs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) for (i = 0; i < src_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) slow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /* Figure-out P and Q destination addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (!(flags & DMA_PREP_PQ_DISABLE_P))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) dst_p = &dst[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (!(flags & DMA_PREP_PQ_DISABLE_Q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) dst_q = &dst[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) /* Create chained requests where each request is upto hw_buf_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (slow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) dst_q_index = src_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (dst_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) for (i = 0; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (*dst_q == src[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) dst_q_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (dst_q_index < src_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) i = dst_q_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) req = sba_prep_dma_pq_single_req(sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) off, dst_p, dst_q, src[i], scf[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) req_len, flags | DMA_PREP_FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) sba_chain_request(first, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) first = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) flags |= DMA_PREP_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) for (i = 0; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (dst_q_index == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) req = sba_prep_dma_pq_single_req(sba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) off, dst_p, dst_q, src[i], scf[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) req_len, flags | DMA_PREP_FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) sba_chain_request(first, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) first = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) flags |= DMA_PREP_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) req = sba_prep_dma_pq_req(sba, off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) dst_p, dst_q, src, src_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) scf, req_len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) sba_chain_request(first, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) first = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) off += req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) len -= req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return (first) ? &first->tx : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) sba_free_chained_requests(first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /* ====== Mailbox callbacks ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static void sba_receive_message(struct mbox_client *cl, void *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) struct brcm_message *m = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) struct sba_request *req = m->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) struct sba_device *sba = req->sba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /* Error count if message has error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (m->error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) dev_err(sba->dev, "%s got message with error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) dma_chan_name(&sba->dma_chan), m->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /* Process received request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) sba_process_received_request(sba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* ====== Debugfs callbacks ====== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) struct sba_device *sba = dev_get_drvdata(file->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* Write stats in file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) sba_write_stats_in_seqfile(sba, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) /* ====== Platform driver routines ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) static int sba_prealloc_channel_resources(struct sba_device *sba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) int i, j, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct sba_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) sba->max_resp_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) &sba->resp_dma_base, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (!sba->resp_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) sba->max_cmds_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) &sba->cmds_dma_base, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (!sba->cmds_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) goto fail_free_resp_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) spin_lock_init(&sba->reqs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) sba->reqs_fence = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) INIT_LIST_HEAD(&sba->reqs_alloc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) INIT_LIST_HEAD(&sba->reqs_pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) INIT_LIST_HEAD(&sba->reqs_active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) INIT_LIST_HEAD(&sba->reqs_aborted_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) INIT_LIST_HEAD(&sba->reqs_free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) for (i = 0; i < sba->max_req; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) req = devm_kzalloc(sba->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct_size(req, cmds, sba->max_cmd_per_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) goto fail_free_cmds_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) INIT_LIST_HEAD(&req->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) req->sba = sba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) req->flags = SBA_REQUEST_STATE_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) INIT_LIST_HEAD(&req->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) atomic_set(&req->next_pending_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) for (j = 0; j < sba->max_cmd_per_req; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) req->cmds[j].cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) req->cmds[j].cmd_dma = sba->cmds_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) (i * sba->max_cmd_per_req + j) * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) (i * sba->max_cmd_per_req + j) * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) req->cmds[j].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) memset(&req->msg, 0, sizeof(req->msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) async_tx_ack(&req->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) req->tx.tx_submit = sba_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) list_add_tail(&req->node, &sba->reqs_free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) fail_free_cmds_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) dma_free_coherent(sba->mbox_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) sba->max_cmds_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) sba->cmds_base, sba->cmds_dma_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) fail_free_resp_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) dma_free_coherent(sba->mbox_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) sba->max_resp_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) sba->resp_base, sba->resp_dma_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) static void sba_freeup_channel_resources(struct sba_device *sba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) dmaengine_terminate_all(&sba->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) sba->cmds_base, sba->cmds_dma_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) sba->resp_base, sba->resp_dma_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) sba->resp_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) sba->resp_dma_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) static int sba_async_register(struct sba_device *sba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) struct dma_device *dma_dev = &sba->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /* Initialize DMA channel cookie */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) sba->dma_chan.device = dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) dma_cookie_init(&sba->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /* Initialize DMA device capability mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) dma_cap_zero(dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) dma_cap_set(DMA_XOR, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) dma_cap_set(DMA_PQ, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * Set mailbox channel device as the base device of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * our dma_device because the actual memory accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * will be done by mailbox controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) dma_dev->dev = sba->mbox_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /* Set base prep routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) dma_dev->device_free_chan_resources = sba_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) dma_dev->device_terminate_all = sba_device_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) dma_dev->device_issue_pending = sba_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) dma_dev->device_tx_status = sba_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /* Set interrupt routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* Set memcpy routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) /* Set xor routine and capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) dma_dev->max_xor = sba->max_xor_srcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) /* Set pq routine and capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /* Initialize DMA device channel list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) INIT_LIST_HEAD(&dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* Register with Linux async DMA framework*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) ret = dma_async_device_register(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) dev_err(sba->dev, "async device register error %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) dma_chan_name(&sba->dma_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static int sba_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) struct sba_device *sba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct platform_device *mbox_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) struct of_phandle_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) /* Allocate main SBA struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (!sba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) sba->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) platform_set_drvdata(pdev, sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) /* Number of mailbox channels should be atleast 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) ret = of_count_phandle_with_args(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) "mboxes", "#mbox-cells");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* Determine SBA version from DT compatible string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) sba->ver = SBA_VER_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) else if (of_device_is_compatible(sba->dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) "brcm,iproc-sba-v2"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) sba->ver = SBA_VER_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) /* Derived Configuration parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) switch (sba->ver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) case SBA_VER_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) sba->hw_buf_size = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) sba->hw_resp_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) sba->max_pq_coefs = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) sba->max_pq_srcs = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) case SBA_VER_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) sba->hw_buf_size = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) sba->hw_resp_size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) sba->max_pq_coefs = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * We can support max_pq_srcs == max_pq_coefs because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * we are limited by number of SBA commands that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * fit in one message for underlying ring manager HW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) sba->max_pq_srcs = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) sba->max_cmd_per_req = sba->max_pq_srcs + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) sba->max_xor_srcs = sba->max_cmd_per_req - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) sba->max_cmds_pool_size = sba->max_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) sba->max_cmd_per_req * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /* Setup mailbox client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) sba->client.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) sba->client.rx_callback = sba_receive_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) sba->client.tx_block = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) sba->client.knows_txdone = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) sba->client.tx_tout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) /* Request mailbox channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) sba->mchan = mbox_request_channel(&sba->client, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (IS_ERR(sba->mchan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) ret = PTR_ERR(sba->mchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) goto fail_free_mchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /* Find-out underlying mailbox device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) ret = of_parse_phandle_with_args(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) "mboxes", "#mbox-cells", 0, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) goto fail_free_mchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) mbox_pdev = of_find_device_by_node(args.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) of_node_put(args.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (!mbox_pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) goto fail_free_mchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) sba->mbox_dev = &mbox_pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) /* Prealloc channel resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) ret = sba_prealloc_channel_resources(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) goto fail_free_mchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) /* Check availability of debugfs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (!debugfs_initialized())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) goto skip_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) /* Create debugfs root entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) /* Create debugfs stats entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) sba_debugfs_stats_show);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) skip_debugfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) /* Register DMA device with Linux async framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) ret = sba_async_register(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) goto fail_free_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /* Print device info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) dma_chan_name(&sba->dma_chan), sba->ver+1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) dev_name(sba->mbox_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) fail_free_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) debugfs_remove_recursive(sba->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) sba_freeup_channel_resources(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) fail_free_mchan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) mbox_free_channel(sba->mchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) static int sba_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct sba_device *sba = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) dma_async_device_unregister(&sba->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) debugfs_remove_recursive(sba->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) sba_freeup_channel_resources(sba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) mbox_free_channel(sba->mchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) static const struct of_device_id sba_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) { .compatible = "brcm,iproc-sba", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) { .compatible = "brcm,iproc-sba-v2", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) MODULE_DEVICE_TABLE(of, sba_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static struct platform_driver sba_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) .probe = sba_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) .remove = sba_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) .name = "bcm-sba-raid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) .of_match_table = sba_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) module_platform_driver(sba_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) MODULE_DESCRIPTION("Broadcom SBA RAID driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) MODULE_LICENSE("GPL v2");