^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SCSI RDMA Protocol lib functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) ***********************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define pr_fmt(fmt) "libsrp: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kfifo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <scsi/srp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <target/target_core_base.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "libsrp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "ibmvscsi_tgt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct srp_buf **ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct iu_entry *iue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (!q->pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (!q->items)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) goto free_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) spin_lock_init(&q->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) for (i = 0, iue = q->items; i < max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) kfifo_in(&q->queue, (void *)&iue, sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) iue->sbuf = ring[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) iue++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) free_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) kfree(q->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static void srp_iu_pool_free(struct srp_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) kfree(q->items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) kfree(q->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct srp_buf **srp_ring_alloc(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) size_t max, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct srp_buf **ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) for (i = 0; i < max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!ring[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!ring[i]->buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) for (i = 0; i < max && ring[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (ring[i]->buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) dma_free_coherent(dev, size, ring[i]->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ring[i]->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) kfree(ring[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) kfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void srp_ring_free(struct device *dev, struct srp_buf **ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) size_t max, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) for (i = 0; i < max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) kfree(ring[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) kfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int srp_target_alloc(struct srp_target *target, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) size_t nr, size_t iu_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) spin_lock_init(&target->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) target->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) target->srp_iu_size = iu_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) target->rx_ring_size = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!target->rx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) goto free_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dev_set_drvdata(target->dev, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) free_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) void srp_target_free(struct srp_target *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) dev_set_drvdata(target->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) target->srp_iu_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) srp_iu_pool_free(&target->iu_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct iu_entry *srp_iu_get(struct srp_target *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct iu_entry *iue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) &target->iu_queue.lock) != sizeof(void *)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) WARN_ONCE(1, "unexpected fifo state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!iue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return iue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) iue->target = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) iue->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return iue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void srp_iu_put(struct iu_entry *iue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) sizeof(void *), &iue->target->iu_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) enum dma_data_direction dir, srp_rdma_t rdma_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int dma_map, int ext_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct iu_entry *iue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct scatterlist *sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int err, nsg = 0, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (dma_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) iue = cmd->iue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) sg = cmd->se_cmd.t_data_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!nsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) pr_err("fail to map %p %d\n", iue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cmd->se_cmd.t_data_nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) len = be32_to_cpu(md->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (dma_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct srp_indirect_buf *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) enum dma_data_direction dir, srp_rdma_t rdma_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int dma_map, int ext_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct iu_entry *iue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct srp_direct_buf *md = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct scatterlist dummy, *sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dma_addr_t token = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) int nmd, nsg = 0, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (dma_map || ext_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) iue = cmd->iue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) sg = cmd->se_cmd.t_data_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) md = &id->desc_list[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) goto rdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (ext_desc && dma_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) md = dma_alloc_coherent(iue->target->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) be32_to_cpu(id->table_desc.len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) &token, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!md) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) pr_err("Can't get dma memory %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) be32_to_cpu(id->table_desc.len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) sg_dma_address(&dummy) = token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) be32_to_cpu(id->table_desc.len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) pr_err("Error copying indirect table %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) pr_err("This command uses external indirect buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) rdma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (dma_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!nsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) pr_err("fail to map %p %d\n", iue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) cmd->se_cmd.t_data_nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) goto free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) len = be32_to_cpu(id->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (dma_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) free_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (token && dma_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dma_free_coherent(iue->target->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) be32_to_cpu(id->table_desc.len), md, token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static int data_out_desc_size(struct srp_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u8 fmt = cmd->buf_fmt >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) switch (fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) case SRP_NO_DATA_DESC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) case SRP_DATA_DESC_DIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) size = sizeof(struct srp_direct_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) case SRP_DATA_DESC_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) size = sizeof(struct srp_indirect_buf) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) pr_err("client error. Invalid data_out_format %x\n", fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * TODO: this can be called multiple times for a single command if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * has very long data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) srp_rdma_t rdma_io, int dma_map, int ext_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct srp_direct_buf *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct srp_indirect_buf *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int offset, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) u8 format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (!cmd->se_cmd.t_data_nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) offset = srp_cmd->add_cdb_len & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) dir = srp_cmd_direction(srp_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (dir == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) offset += data_out_desc_size(srp_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (dir == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) format = srp_cmd->buf_fmt >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) format = srp_cmd->buf_fmt & ((1U << 4) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) switch (format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) case SRP_NO_DATA_DESC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) case SRP_DATA_DESC_DIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) md = (struct srp_direct_buf *)(srp_cmd->add_data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) case SRP_DATA_DESC_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ext_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pr_err("Unknown format %d %x\n", dir, format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct srp_direct_buf *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct srp_indirect_buf *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u64 len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) uint offset = cmd->add_cdb_len & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) u8 fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (dir == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) fmt = cmd->buf_fmt >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) fmt = cmd->buf_fmt & ((1U << 4) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) offset += data_out_desc_size(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) switch (fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) case SRP_NO_DATA_DESC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) case SRP_DATA_DESC_DIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) md = (struct srp_direct_buf *)(cmd->add_data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) len = be32_to_cpu(md->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) case SRP_DATA_DESC_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) id = (struct srp_indirect_buf *)(cmd->add_data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) len = be32_to_cpu(id->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) pr_err("invalid data format %x\n", fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) u64 *data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct srp_indirect_buf *idb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct srp_direct_buf *db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) uint add_cdb_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * The pointer computations below will only be compiled correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * whether srp_cmd::add_data has been declared as a byte pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) && !__same_type(srp_cmd->add_data[0], (u8)0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) BUG_ON(!dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) BUG_ON(!data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) *data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (srp_cmd->buf_fmt & 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) else if (srp_cmd->buf_fmt >> 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) add_cdb_offset = srp_cmd->add_cdb_len & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) db = (struct srp_direct_buf *)(srp_cmd->add_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) + add_cdb_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) *data_len = be32_to_cpu(db->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) idb = (struct srp_indirect_buf *)(srp_cmd->add_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) + add_cdb_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *data_len = be32_to_cpu(idb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) MODULE_AUTHOR("FUJITA Tomonori");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) MODULE_LICENSE("GPL");