^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Virtio SCSI HBA driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright Red Hat, Inc. 2011
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Paolo Bonzini <pbonzini@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/virtio_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/virtio_config.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/virtio_scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <scsi/scsi_devinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/seqlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/blk-mq-virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "sd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define VIRTIO_SCSI_MEMPOOL_SZ 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define VIRTIO_SCSI_EVENT_LEN 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define VIRTIO_SCSI_VQ_BASE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Command queue element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct virtio_scsi_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct scsi_cmnd *sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct completion *comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct virtio_scsi_cmd_req cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct virtio_scsi_cmd_req_pi cmd_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct virtio_scsi_ctrl_tmf_req tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct virtio_scsi_ctrl_an_req an;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) } req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct virtio_scsi_cmd_resp cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct virtio_scsi_ctrl_tmf_resp tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct virtio_scsi_ctrl_an_resp an;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct virtio_scsi_event evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) } resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) } ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct virtio_scsi_event_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct virtio_scsi *vscsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct virtio_scsi_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct virtio_scsi_vq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Protects vq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) spinlock_t vq_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* Driver instance state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct virtio_scsi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct virtio_device *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Get some buffers ready for event vq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u32 num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct hlist_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Protected by event_vq lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) bool stop_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct virtio_scsi_vq ctrl_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct virtio_scsi_vq event_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct virtio_scsi_vq req_vqs[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static struct kmem_cache *virtscsi_cmd_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static mempool_t *virtscsi_cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (resid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) scsi_set_resid(sc, resid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Called with vq_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct virtio_scsi_cmd *cmd = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct scsi_cmnd *sc = cmd->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dev_dbg(&sc->device->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) "cmd %p response %u status %#02x sense_len %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) sc, resp->response, resp->status, resp->sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) sc->result = resp->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) switch (resp->response) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) case VIRTIO_SCSI_S_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) set_host_byte(sc, DID_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) case VIRTIO_SCSI_S_OVERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) set_host_byte(sc, DID_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) case VIRTIO_SCSI_S_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) set_host_byte(sc, DID_ABORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) case VIRTIO_SCSI_S_BAD_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) set_host_byte(sc, DID_BAD_TARGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) case VIRTIO_SCSI_S_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) set_host_byte(sc, DID_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) case VIRTIO_SCSI_S_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) set_host_byte(sc, DID_BUS_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) case VIRTIO_SCSI_S_TARGET_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) set_host_byte(sc, DID_TARGET_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) case VIRTIO_SCSI_S_NEXUS_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) set_host_byte(sc, DID_NEXUS_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) scmd_printk(KERN_WARNING, sc, "Unknown response %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) resp->response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) case VIRTIO_SCSI_S_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) set_host_byte(sc, DID_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) VIRTIO_SCSI_SENSE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (sc->sense_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) memcpy(sc->sense_buffer, resp->sense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) min_t(u32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) virtio32_to_cpu(vscsi->vdev, resp->sense_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) VIRTIO_SCSI_SENSE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (resp->sense_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) set_driver_byte(sc, DRIVER_SENSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) sc->scsi_done(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void virtscsi_vq_done(struct virtio_scsi *vscsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct virtio_scsi_vq *virtscsi_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) void (*fn)(struct virtio_scsi *vscsi, void *buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct virtqueue *vq = virtscsi_vq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) virtqueue_disable_cb(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) fn(vscsi, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (unlikely(virtqueue_is_broken(vq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) } while (!virtqueue_enable_cb(vq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static void virtscsi_req_done(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct virtio_scsi *vscsi = shost_priv(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int index = vq->index - VIRTIO_SCSI_VQ_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int i, num_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) num_vqs = vscsi->num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) for (i = 0; i < num_vqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) virtscsi_complete_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct virtio_scsi_cmd *cmd = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (cmd->comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) complete(cmd->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static void virtscsi_ctrl_done(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct virtio_scsi *vscsi = shost_priv(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void virtscsi_handle_event(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static int virtscsi_kick_event(struct virtio_scsi *vscsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct virtio_scsi_event_node *event_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) INIT_WORK(&event_node->work, virtscsi_handle_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) virtqueue_kick(vscsi->event_vq.vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) vscsi->event_list[i].vscsi = vscsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* Stop scheduling work before calling cancel_work_sync. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) spin_lock_irq(&vscsi->event_vq.vq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) vscsi->stop_events = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) spin_unlock_irq(&vscsi->event_vq.vq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) cancel_work_sync(&vscsi->event_list[i].work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct virtio_scsi_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned int target = event->lun[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned int lun = (event->lun[2] << 8) | event->lun[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case VIRTIO_SCSI_EVT_RESET_RESCAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (lun == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) scsi_scan_target(&shost->shost_gendev, 0, target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) scsi_add_device(shost, 0, target, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) case VIRTIO_SCSI_EVT_RESET_REMOVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) sdev = scsi_device_lookup(shost, 0, target, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) pr_err("SCSI device %d 0 %d %d not found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) shost->host_no, target, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct virtio_scsi_event *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned int target = event->lun[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned int lun = (event->lun[2] << 8) | event->lun[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) sdev = scsi_device_lookup(shost, 0, target, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pr_err("SCSI device %d 0 %d %d not found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) shost->host_no, target, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Handle "Parameters changed", "Mode parameters changed", and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) "Capacity data has changed". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) scsi_rescan_device(&sdev->sdev_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned char scsi_cmd[MAX_COMMAND_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int result, inquiry_len, inq_result_len = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) char *inq_result = kmalloc(inq_result_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) shost_for_each_device(sdev, shost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) memset(scsi_cmd, 0, sizeof(scsi_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) scsi_cmd[0] = INQUIRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) scsi_cmd[4] = (unsigned char) inquiry_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) memset(inq_result, 0, inq_result_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) inq_result, inquiry_len, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) SD_TIMEOUT, SD_MAX_RETRIES, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (result == 0 && inq_result[0] >> 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* PQ indicates the LUN is not attached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) } else if (host_byte(result) == DID_BAD_TARGET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * If all LUNs of a virtio-scsi device are unplugged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * it will respond with BAD TARGET on any INQUIRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * Remove the device in this case as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) kfree(inq_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void virtscsi_handle_event(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct virtio_scsi_event_node *event_node =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) container_of(work, struct virtio_scsi_event_node, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct virtio_scsi *vscsi = event_node->vscsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct virtio_scsi_event *event = &event_node->event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (event->event &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) event->event &= ~cpu_to_virtio32(vscsi->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) VIRTIO_SCSI_T_EVENTS_MISSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) virtscsi_rescan_hotunplug(vscsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) scsi_scan_host(virtio_scsi_host(vscsi->vdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) switch (virtio32_to_cpu(vscsi->vdev, event->event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) case VIRTIO_SCSI_T_NO_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) case VIRTIO_SCSI_T_TRANSPORT_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) virtscsi_handle_transport_reset(vscsi, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) case VIRTIO_SCSI_T_PARAM_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) virtscsi_handle_param_change(vscsi, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) pr_err("Unsupported virtio scsi event %x\n", event->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) virtscsi_kick_event(vscsi, event_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct virtio_scsi_event_node *event_node = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!vscsi->stop_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) queue_work(system_freezable_wq, &event_node->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static void virtscsi_event_done(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct virtio_scsi *vscsi = shost_priv(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static int __virtscsi_add_cmd(struct virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct virtio_scsi_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) size_t req_size, size_t resp_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct scsi_cmnd *sc = cmd->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct scatterlist *sgs[6], req, resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct sg_table *out, *in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned out_num = 0, in_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) out = in = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (sc && sc->sc_data_direction != DMA_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (sc->sc_data_direction != DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) out = &sc->sdb.table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (sc->sc_data_direction != DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) in = &sc->sdb.table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* Request header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) sg_init_one(&req, &cmd->req, req_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) sgs[out_num++] = &req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* Data-out buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* Place WRITE protection SGLs before Data OUT payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (scsi_prot_sg_count(sc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) sgs[out_num++] = scsi_prot_sglist(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) sgs[out_num++] = out->sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Response header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) sg_init_one(&resp, &cmd->resp, resp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) sgs[out_num + in_num++] = &resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* Data-in buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* Place READ protection SGLs before Data IN payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (scsi_prot_sg_count(sc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) sgs[out_num + in_num++] = scsi_prot_sglist(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) sgs[out_num + in_num++] = in->sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static void virtscsi_kick_vq(struct virtio_scsi_vq *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) bool needs_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) spin_lock_irqsave(&vq->vq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) needs_kick = virtqueue_kick_prepare(vq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) spin_unlock_irqrestore(&vq->vq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (needs_kick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) virtqueue_notify(vq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * @vq : the struct virtqueue we're talking about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * @cmd : command structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * @req_size : size of the request buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * @resp_size : size of the response buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * @kick : whether to kick the virtqueue immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static int virtscsi_add_cmd(struct virtio_scsi_vq *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct virtio_scsi_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) size_t req_size, size_t resp_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) bool kick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) bool needs_kick = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) spin_lock_irqsave(&vq->vq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!err && kick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) needs_kick = virtqueue_kick_prepare(vq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) spin_unlock_irqrestore(&vq->vq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (needs_kick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) virtqueue_notify(vq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static void virtio_scsi_init_hdr(struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct virtio_scsi_cmd_req *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) cmd->lun[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) cmd->lun[1] = sc->device->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) cmd->lun[3] = sc->device->lun & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) cmd->prio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) cmd->crn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #ifdef CONFIG_BLK_DEV_INTEGRITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct virtio_scsi_cmd_req_pi *cmd_pi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct request *rq = sc->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct blk_integrity *bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (!rq || !scsi_prot_sg_count(sc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) bi = blk_get_integrity(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (sc->sc_data_direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) bio_integrity_bytes(bi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) blk_rq_sectors(rq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) else if (sc->sc_data_direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) cmd_pi->pi_bytesin = cpu_to_virtio32(vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) bio_integrity_bytes(bi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) blk_rq_sectors(rq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) u32 tag = blk_mq_unique_tag(sc->request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) u16 hwq = blk_mq_unique_tag_to_hwq(tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return &vscsi->req_vqs[hwq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static int virtscsi_queuecommand(struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct virtio_scsi *vscsi = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) bool kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) int req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* TODO: check feature bit and fail if unsupported? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) dev_dbg(&sc->device->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) cmd->sc = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #ifdef CONFIG_BLK_DEV_INTEGRITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) req_size = sizeof(cmd->req.cmd_pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) req_size = sizeof(cmd->req.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) kick = (sc->flags & SCMD_LAST) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (ret == -EIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) spin_lock_irqsave(&req_vq->vq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) virtscsi_complete_cmd(vscsi, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) spin_unlock_irqrestore(&req_vq->vq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) } else if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) DECLARE_COMPLETION_ONSTACK(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) int ret = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) cmd->comp = ∁
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) wait_for_completion(&comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ret = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * The spec guarantees that all requests related to the TMF have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * been completed, but the callback might not have run yet if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * we're using independent interrupts (e.g. MSI). Poll the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * virtqueues once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * In the abort case, sc->scsi_done will do nothing, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * the block layer must have detected a timeout and as a result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * REQ_ATOM_COMPLETE has been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) virtscsi_poll_requests(vscsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) mempool_free(cmd, virtscsi_cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static int virtscsi_device_reset(struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct virtio_scsi *vscsi = shost_priv(sc->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct virtio_scsi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) sdev_printk(KERN_INFO, sc->device, "device reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) memset(cmd, 0, sizeof(*cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) .type = VIRTIO_SCSI_T_TMF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) .subtype = cpu_to_virtio32(vscsi->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) .lun[0] = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) .lun[1] = sc->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) .lun[2] = (sc->device->lun >> 8) | 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) .lun[3] = sc->device->lun & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return virtscsi_tmf(vscsi, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static int virtscsi_device_alloc(struct scsi_device *sdevice)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * may have transfer limits which come from the host SCSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * controller or something on the host side other than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * target itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * To make this work properly, the hypervisor can adjust the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * target's VPD information to advertise these limits. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * for that to work, the guest has to look at the VPD pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * which we won't do by default if it is an SPC-2 device, even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * if it does actually support it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * So, set the blist to always try to read the VPD pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * @sdev: Virtscsi target whose queue depth to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * @qdepth: New queue depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct Scsi_Host *shost = sdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int max_depth = shost->cmd_per_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return scsi_change_queue_depth(sdev, min(max_depth, qdepth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int virtscsi_abort(struct scsi_cmnd *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct virtio_scsi *vscsi = shost_priv(sc->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct virtio_scsi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) scmd_printk(KERN_INFO, sc, "abort\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) memset(cmd, 0, sizeof(*cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) .type = VIRTIO_SCSI_T_TMF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) .lun[0] = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) .lun[1] = sc->device->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) .lun[2] = (sc->device->lun >> 8) | 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) .lun[3] = sc->device->lun & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return virtscsi_tmf(vscsi, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static int virtscsi_map_queues(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct virtio_scsi *vscsi = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct virtio_scsi *vscsi = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) virtscsi_kick_vq(&vscsi->req_vqs[hwq]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * The host guarantees to respond to each command, although I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * latencies might be higher than on bare metal. Reset the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * unconditionally to give the host a chance to perform EH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return BLK_EH_RESET_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static struct scsi_host_template virtscsi_host_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) .name = "Virtio SCSI HBA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) .proc_name = "virtio_scsi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) .this_id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) .cmd_size = sizeof(struct virtio_scsi_cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) .queuecommand = virtscsi_queuecommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) .commit_rqs = virtscsi_commit_rqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) .change_queue_depth = virtscsi_change_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) .eh_abort_handler = virtscsi_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) .eh_device_reset_handler = virtscsi_device_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) .eh_timed_out = virtscsi_eh_timed_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .slave_alloc = virtscsi_device_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .dma_boundary = UINT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .map_queues = virtscsi_map_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .track_queue_depth = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #define virtscsi_config_get(vdev, fld) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) __virtio_native_type(struct virtio_scsi_config, fld) __val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) __val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #define virtscsi_config_set(vdev, fld, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) spin_lock_init(&virtscsi_vq->vq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) virtscsi_vq->vq = vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static void virtscsi_remove_vqs(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Stop all the virtqueues. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) vdev->config->reset(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static int virtscsi_init(struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct virtio_scsi *vscsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) u32 num_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) vq_callback_t **callbacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) const char **names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct virtqueue **vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct irq_affinity desc = { .pre_vectors = 2 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!callbacks || !vqs || !names) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) callbacks[0] = virtscsi_ctrl_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) callbacks[1] = virtscsi_event_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) names[0] = "control";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) names[1] = "event";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) callbacks[i] = virtscsi_req_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) names[i] = "request";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* Discover virtqueues and write information to configuration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) vqs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) kfree(names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) kfree(callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) kfree(vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) virtscsi_remove_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static int virtscsi_probe(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct virtio_scsi *vscsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) u32 sg_elems, num_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) u32 cmd_per_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) u32 num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (!vdev->config->get) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) dev_err(&vdev->dev, "%s failure: config access disabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* We need to know how many queues before we allocate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) num_targets = virtscsi_config_get(vdev, max_target) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) shost = scsi_host_alloc(&virtscsi_host_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct_size(vscsi, req_vqs, num_queues));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (!shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) shost->sg_tablesize = sg_elems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) vscsi = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) vscsi->vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) vscsi->num_queues = num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) vdev->priv = shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) err = virtscsi_init(vdev, vscsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) goto virtscsi_init_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* LUNs > 256 are reported with format 1, so they go in the range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * 16640-32767.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) shost->max_id = num_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) shost->max_channel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) shost->nr_hw_queues = num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) #ifdef CONFIG_BLK_DEV_INTEGRITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) int host_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) scsi_host_set_prot(shost, host_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) err = scsi_add_host(shost, &vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) goto scsi_add_host_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) virtio_device_ready(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) virtscsi_kick_event_all(vscsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) scsi_scan_host(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) scsi_add_host_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) virtscsi_init_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) scsi_host_put(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static void virtscsi_remove(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct Scsi_Host *shost = virtio_scsi_host(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct virtio_scsi *vscsi = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) virtscsi_cancel_event_work(vscsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) scsi_remove_host(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) virtscsi_remove_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) scsi_host_put(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) static int virtscsi_freeze(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) virtscsi_remove_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static int virtscsi_restore(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct Scsi_Host *sh = virtio_scsi_host(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct virtio_scsi *vscsi = shost_priv(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) err = virtscsi_init(vdev, vscsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) virtio_device_ready(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) virtscsi_kick_event_all(vscsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static struct virtio_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) { 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static unsigned int features[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) VIRTIO_SCSI_F_HOTPLUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) VIRTIO_SCSI_F_CHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) #ifdef CONFIG_BLK_DEV_INTEGRITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) VIRTIO_SCSI_F_T10_PI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static struct virtio_driver virtio_scsi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) .feature_table = features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) .feature_table_size = ARRAY_SIZE(features),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) .driver.name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) .driver.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .id_table = id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .probe = virtscsi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .freeze = virtscsi_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) .restore = virtscsi_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) .remove = virtscsi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static int __init init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (!virtscsi_cmd_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) virtscsi_cmd_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) virtscsi_cmd_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (!virtscsi_cmd_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ret = register_virtio_driver(&virtio_scsi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) mempool_destroy(virtscsi_cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) virtscsi_cmd_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) kmem_cache_destroy(virtscsi_cmd_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) virtscsi_cmd_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static void __exit fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) unregister_virtio_driver(&virtio_scsi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) mempool_destroy(virtscsi_cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) kmem_cache_destroy(virtscsi_cmd_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) module_init(init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) module_exit(fini);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) MODULE_DEVICE_TABLE(virtio, id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) MODULE_DESCRIPTION("Virtio SCSI HBA driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) MODULE_LICENSE("GPL");