^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Vhost kernel TCM fabric driver for virtio SCSI initiators
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * (C) Copyright 2010-2013 Datera, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * (C) Copyright 2010-2012 IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Authors: Nicholas A. Bellinger <nab@daterainc.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * the Free Software Foundation; either version 2 of the License, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * (at your option) any later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * This program is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <generated/utsrelease.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/configfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/eventfd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <scsi/scsi_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <scsi/scsi_proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <target/target_core_base.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <target/target_core_fabric.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/vhost.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/virtio_scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/llist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "vhost.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define VHOST_SCSI_VERSION "v0.1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define VHOST_SCSI_NAMELEN 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define VHOST_SCSI_MAX_CDB_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define VHOST_SCSI_PREALLOC_SGLS 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define VHOST_SCSI_PREALLOC_UPAGES 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Max number of requests before requeueing the job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Using this limit prevents one virtqueue from starving others with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define VHOST_SCSI_WEIGHT 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct vhost_scsi_inflight {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Wait for the flush operation to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct completion comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Refcount for the inflight reqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct vhost_scsi_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int tvc_vq_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* virtio-scsi initiator task attribute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int tvc_task_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* virtio-scsi response incoming iovecs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int tvc_in_iovs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* virtio-scsi initiator data direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) enum dma_data_direction tvc_data_direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Expected data transfer length from virtio-scsi header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u32 tvc_exp_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u64 tvc_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* The number of scatterlists associated with this cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u32 tvc_sgl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u32 tvc_prot_sgl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u32 tvc_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Pointer to the SGL formatted memory from virtio-scsi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct scatterlist *tvc_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct scatterlist *tvc_prot_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct page **tvc_upages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Pointer to response header iovec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct iovec tvc_resp_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Pointer to vhost_scsi for our device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct vhost_scsi *tvc_vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Pointer to vhost_virtqueue for the cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct vhost_virtqueue *tvc_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Pointer to vhost nexus memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct vhost_scsi_nexus *tvc_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* The TCM I/O descriptor that is accessed via container_of() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct se_cmd tvc_se_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Copy of the incoming SCSI command descriptor block (CDB) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Sense buffer that will be mapped into outgoing status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Completed commands list, serviced from vhost worker thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct llist_node tvc_completion_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Used to track inflight cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct vhost_scsi_inflight *inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct vhost_scsi_nexus {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* Pointer to TCM session for I_T Nexus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct se_session *tvn_se_sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct vhost_scsi_tpg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Vhost port target portal group tag for TCM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u16 tport_tpgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int tv_tpg_port_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int tv_tpg_vhost_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Used for enabling T10-PI with legacy devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int tv_fabric_prot_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* list for vhost_scsi_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct list_head tv_tpg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Used to protect access for tpg_nexus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct mutex tv_tpg_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct vhost_scsi_nexus *tpg_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Pointer back to vhost_scsi_tport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct vhost_scsi_tport *tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Returned by vhost_scsi_make_tpg() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct se_portal_group se_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct vhost_scsi *vhost_scsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct list_head tmf_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct vhost_scsi_tport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* SCSI protocol the tport is providing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u8 tport_proto_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Binary World Wide unique Port Name for Vhost Target port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u64 tport_wwpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* ASCII formatted WWPN for Vhost Target port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) char tport_name[VHOST_SCSI_NAMELEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Returned by vhost_scsi_make_tport() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct se_wwn tport_wwn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct vhost_scsi_evt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* event to be sent to guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct virtio_scsi_event event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* event list, serviced from vhost worker thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct llist_node list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) VHOST_SCSI_VQ_CTL = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) VHOST_SCSI_VQ_EVT = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) VHOST_SCSI_VQ_IO = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) (1ULL << VIRTIO_SCSI_F_T10_PI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define VHOST_SCSI_MAX_TARGET 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define VHOST_SCSI_MAX_VQ 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define VHOST_SCSI_MAX_EVENT 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct vhost_scsi_virtqueue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct vhost_virtqueue vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Reference counting for inflight reqs, used for flush operation. At
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * each time, one reference tracks new commands submitted, while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * wait for another one to reach 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct vhost_scsi_inflight inflights[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Indicate current inflight in use, protected by vq->mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * Writers must also take dev mutex and flush under it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int inflight_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct vhost_scsi_cmd *scsi_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct sbitmap scsi_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int max_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct vhost_scsi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Protected by vhost_scsi->dev.mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct vhost_scsi_tpg **vs_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct vhost_dev dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct vhost_work vs_completion_work; /* cmd completion work item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct llist_head vs_completion_list; /* cmd completion queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct vhost_work vs_event_work; /* evt injection work item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct llist_head vs_event_list; /* evt injection queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bool vs_events_missed; /* any missed events, protected by vq->mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int vs_events_nr; /* num of pending events, protected by vq->mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct vhost_scsi_tmf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct vhost_work vwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct vhost_scsi_tpg *tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct vhost_scsi *vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct vhost_scsi_virtqueue *svq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct list_head queue_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct se_cmd se_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u8 scsi_resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct vhost_scsi_inflight *inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct iovec resp_iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int in_iovs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int vq_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Context for processing request and control queue operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct vhost_scsi_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned int out, in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) size_t req_size, rsp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) size_t out_size, in_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u8 *target, *lunp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) void *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct iov_iter out_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static struct workqueue_struct *vhost_scsi_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static DEFINE_MUTEX(vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static LIST_HEAD(vhost_scsi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static void vhost_scsi_done_inflight(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct vhost_scsi_inflight *inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) inflight = container_of(kref, struct vhost_scsi_inflight, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) complete(&inflight->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct vhost_scsi_inflight *old_inflight[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct vhost_scsi_inflight *new_inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct vhost_virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int idx, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) vq = &vs->vqs[i].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* store old infight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) idx = vs->vqs[i].inflight_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (old_inflight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) old_inflight[i] = &vs->vqs[i].inflights[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* setup new infight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) vs->vqs[i].inflight_idx = idx ^ 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) new_inflight = &vs->vqs[i].inflights[idx ^ 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) kref_init(&new_inflight->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) init_completion(&new_inflight->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static struct vhost_scsi_inflight *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct vhost_scsi_inflight *inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct vhost_scsi_virtqueue *svq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) inflight = &svq->inflights[svq->inflight_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) kref_get(&inflight->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) kref_put(&inflight->kref, vhost_scsi_done_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct vhost_scsi_tpg *tpg = container_of(se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct vhost_scsi_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct vhost_scsi_tport *tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return &tport->tport_name[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct vhost_scsi_tpg *tpg = container_of(se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct vhost_scsi_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return tpg->tport_tpgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct vhost_scsi_tpg *tpg = container_of(se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct vhost_scsi_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return tpg->tv_fabric_prot_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void vhost_scsi_release_cmd_res(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct vhost_scsi_cmd, tvc_se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct vhost_scsi_virtqueue *svq = container_of(tv_cmd->tvc_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct vhost_scsi_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct vhost_scsi_inflight *inflight = tv_cmd->inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (tv_cmd->tvc_sgl_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) put_page(sg_page(&tv_cmd->tvc_sgl[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (tv_cmd->tvc_prot_sgl_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) sbitmap_clear_bit(&svq->scsi_tags, se_cmd->map_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) vhost_scsi_put_inflight(inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static void vhost_scsi_release_tmf_res(struct vhost_scsi_tmf *tmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct vhost_scsi_tpg *tpg = tmf->tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct vhost_scsi_inflight *inflight = tmf->inflight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) mutex_lock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) list_add_tail(&tpg->tmf_queue, &tmf->queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) vhost_scsi_put_inflight(inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct vhost_scsi_tmf *tmf = container_of(se_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct vhost_scsi_tmf, se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) vhost_work_queue(&tmf->vhost->dev, &tmf->vwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct vhost_scsi_cmd *cmd = container_of(se_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct vhost_scsi_cmd, tvc_se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct vhost_scsi *vs = cmd->tvc_vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) vhost_work_queue(&vs->dev, &vs->vs_completion_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /* Go ahead and process the write immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) target_execute_cmd(se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) transport_generic_free_cmd(se_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) transport_generic_free_cmd(se_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) tmf->scsi_resp = se_cmd->se_tmr_req->response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) transport_generic_free_cmd(&tmf->se_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) vs->vs_events_nr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) kfree(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static struct vhost_scsi_evt *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) vhost_scsi_allocate_evt(struct vhost_scsi *vs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) u32 event, u32 reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct vhost_scsi_evt *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) vs->vs_events_missed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) evt = kzalloc(sizeof(*evt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (!evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) vs->vs_events_missed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) evt->event.event = cpu_to_vhost32(vq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) evt->event.reason = cpu_to_vhost32(vq, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) vs->vs_events_nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return target_put_sess_cmd(se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct virtio_scsi_event *event = &evt->event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct virtio_scsi_event __user *eventp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned out, in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int head, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!vhost_vq_get_backend(vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) vs->vs_events_missed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) vhost_disable_notify(&vs->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) head = vhost_get_vq_desc(vq, vq->iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ARRAY_SIZE(vq->iov), &out, &in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (head < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) vs->vs_events_missed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (head == vq->num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (vhost_enable_notify(&vs->dev, vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) vs->vs_events_missed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) vq->iov[out].iov_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) vs->vs_events_missed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (vs->vs_events_missed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) vs->vs_events_missed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) eventp = vq->iov[out].iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ret = __copy_to_user(eventp, event, sizeof(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) vhost_add_used_and_signal(&vs->dev, vq, head, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) vq_err(vq, "Faulted on vhost_scsi_send_event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static void vhost_scsi_evt_work(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) vs_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct vhost_scsi_evt *evt, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct llist_node *llnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) llnode = llist_del_all(&vs->vs_event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) llist_for_each_entry_safe(evt, t, llnode, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) vhost_scsi_do_evt_work(vs, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) vhost_scsi_free_evt(vs, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* Fill in status and signal that we are done processing this command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * This is scheduled in the vhost work queue so we are called with the owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * process mm and can access the vring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) vs_completion_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct virtio_scsi_cmd_resp v_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct vhost_scsi_cmd *cmd, *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct llist_node *llnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct se_cmd *se_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct iov_iter iov_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int ret, vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) llnode = llist_del_all(&vs->vs_completion_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) se_cmd = &cmd->tvc_se_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) cmd, se_cmd->residual_count, se_cmd->scsi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) memset(&v_rsp, 0, sizeof(v_rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* TODO is status_qualifier field needed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) v_rsp.status = se_cmd->scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) se_cmd->scsi_sense_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) memcpy(v_rsp.sense, cmd->tvc_sense_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) se_cmd->scsi_sense_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) cmd->tvc_in_iovs, sizeof(v_rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (likely(ret == sizeof(v_rsp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct vhost_scsi_virtqueue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) vq = q - vs->vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) __set_bit(vq, signal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) pr_err("Faulted on virtio_scsi_cmd_resp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) vhost_scsi_release_cmd_res(se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) vq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) < VHOST_SCSI_MAX_VQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) vhost_signal(&vs->dev, &vs->vqs[vq].vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static struct vhost_scsi_cmd *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) u32 exp_data_len, int data_direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct vhost_scsi_virtqueue *svq = container_of(vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct vhost_scsi_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct vhost_scsi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct vhost_scsi_nexus *tv_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct scatterlist *sg, *prot_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) tv_nexus = tpg->tpg_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (!tv_nexus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) pr_err("Unable to locate active struct vhost_scsi_nexus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return ERR_PTR(-EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) tag = sbitmap_get(&svq->scsi_tags, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (tag < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) cmd = &svq->scsi_cmds[tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) sg = cmd->tvc_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) prot_sg = cmd->tvc_prot_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) pages = cmd->tvc_upages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) memset(cmd, 0, sizeof(*cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) cmd->tvc_sgl = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) cmd->tvc_prot_sgl = prot_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) cmd->tvc_upages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) cmd->tvc_se_cmd.map_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) cmd->tvc_tag = scsi_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) cmd->tvc_lun = lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) cmd->tvc_task_attr = task_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) cmd->tvc_exp_data_len = exp_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) cmd->tvc_data_direction = data_direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) cmd->tvc_nexus = tv_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) cmd->inflight = vhost_scsi_get_inflight(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * Map a user memory range into a scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * Returns the number of scatterlist entries used or -errno on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) bool write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct page **pages = cmd->tvc_upages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct scatterlist *sg = sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ssize_t bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) unsigned int npages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) VHOST_SCSI_PREALLOC_UPAGES, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* No pages were pinned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (bytes <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return bytes < 0 ? bytes : -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) iov_iter_advance(iter, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) while (bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) unsigned n = min_t(unsigned, PAGE_SIZE - offset, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) sg_set_page(sg++, pages[npages++], n, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) bytes -= n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int sgl_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (!iter || !iter->iov) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) " present\n", __func__, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) sgl_count = iov_iter_npages(iter, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (sgl_count > max_sgls) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) " max_sgls: %d\n", __func__, sgl_count, max_sgls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return sgl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct scatterlist *sg, int sg_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct scatterlist *p = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) while (iov_iter_count(iter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ret = vhost_scsi_map_to_sgl(cmd, iter, sg, write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) while (p < sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct page *page = sg_page(p++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) sg += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) size_t prot_bytes, struct iov_iter *prot_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) size_t data_bytes, struct iov_iter *data_iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) int sgl_count, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (prot_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) VHOST_SCSI_PREALLOC_PROT_SGLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (sgl_count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return sgl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) sg_init_table(cmd->tvc_prot_sgl, sgl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) cmd->tvc_prot_sgl_count = sgl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) cmd->tvc_prot_sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) cmd->tvc_prot_sgl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) cmd->tvc_prot_sgl_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) VHOST_SCSI_PREALLOC_SGLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (sgl_count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return sgl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) sg_init_table(cmd->tvc_sgl, sgl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) cmd->tvc_sgl_count = sgl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) cmd->tvc_sgl, cmd->tvc_sgl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) cmd->tvc_sgl, cmd->tvc_sgl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) cmd->tvc_sgl_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static int vhost_scsi_to_tcm_attr(int attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) case VIRTIO_SCSI_S_SIMPLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return TCM_SIMPLE_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) case VIRTIO_SCSI_S_ORDERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return TCM_ORDERED_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) case VIRTIO_SCSI_S_HEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return TCM_HEAD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) case VIRTIO_SCSI_S_ACA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return TCM_ACA_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return TCM_SIMPLE_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static void vhost_scsi_submission_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) struct vhost_scsi_cmd *cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) container_of(work, struct vhost_scsi_cmd, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct vhost_scsi_nexus *tv_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* FIXME: BIDI operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (cmd->tvc_sgl_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) sg_ptr = cmd->tvc_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (cmd->tvc_prot_sgl_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) sg_prot_ptr = cmd->tvc_prot_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) se_cmd->prot_pto = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) sg_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) tv_nexus = cmd->tvc_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) se_cmd->tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) cmd->tvc_lun, cmd->tvc_exp_data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) cmd->tvc_prot_sgl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) transport_send_check_condition_and_sense(se_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) transport_generic_free_cmd(se_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) vhost_scsi_send_bad_target(struct vhost_scsi *vs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct vhost_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) int head, unsigned out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct virtio_scsi_cmd_resp __user *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct virtio_scsi_cmd_resp rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) memset(&rsp, 0, sizeof(rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) resp = vq->iov[out].iov_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ret = __copy_to_user(resp, &rsp, sizeof(rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) vhost_add_used_and_signal(&vs->dev, vq, head, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) pr_err("Faulted on virtio_scsi_cmd_resp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct vhost_scsi_ctx *vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) vc->head = vhost_get_vq_desc(vq, vq->iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) vc->head, vc->out, vc->in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* On error, stop handling until the next kick. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (unlikely(vc->head < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* Nothing new? Wait for eventfd to tell us they refilled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (vc->head == vq->num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) vhost_disable_notify(&vs->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Get the size of request and response buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * FIXME: Not correct for BIDI operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) vc->out_size = iov_length(vq->iov, vc->out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * Copy over the virtio-scsi request header, which for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * ANY_LAYOUT enabled guest may span multiple iovecs, or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * single iovec may contain both the header + outgoing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * WRITE payloads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * copy_from_iter() will advance out_iter, so that it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * point at the start of the outgoing WRITE payload, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * DMA_TO_DEVICE is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (unlikely(vc->in_size < vc->rsp_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) vq_err(vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) "Response buf too small, need min %zu bytes got %zu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) vc->rsp_size, vc->in_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) } else if (unlikely(vc->out_size < vc->req_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) vq_err(vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) "Request buf too small, need min %zu bytes got %zu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) vc->req_size, vc->out_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct vhost_scsi_tpg **tpgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) int ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) &vc->out_iter))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) vq_err(vq, "Faulted on copy_from_iter_full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) } else if (unlikely(*vc->lunp != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /* virtio-scsi spec requires byte 0 of the lun to be 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct vhost_scsi_tpg **vs_tpg, *tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) vs_tpg = vhost_vq_get_backend(vq); /* validated at handler entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) tpg = READ_ONCE(vs_tpg[*vc->target]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (unlikely(!tpg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (tpgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) *tpgp = tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) static u16 vhost_buf_to_lun(u8 *lun_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return ((lun_buf[2] << 8) | lun_buf[3]) & 0x3FFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct vhost_scsi_tpg **vs_tpg, *tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct virtio_scsi_cmd_req v_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct virtio_scsi_cmd_req_pi v_req_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct vhost_scsi_ctx vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct vhost_scsi_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct iov_iter in_iter, prot_iter, data_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) u64 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) u32 exp_data_len, data_direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) int ret, prot_bytes, c = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) u16 lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) u8 task_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) void *cdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * We can handle the vq only after the endpoint is setup by calling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * VHOST_SCSI_SET_ENDPOINT ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) vs_tpg = vhost_vq_get_backend(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (!vs_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) memset(&vc, 0, sizeof(vc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) vhost_disable_notify(&vs->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ret = vhost_scsi_get_desc(vs, vq, &vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * Setup pointers and values based upon different virtio-scsi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * request header if T10_PI is enabled in KVM guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (t10_pi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) vc.req = &v_req_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) vc.req_size = sizeof(v_req_pi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) vc.lunp = &v_req_pi.lun[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) vc.target = &v_req_pi.lun[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) vc.req = &v_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) vc.req_size = sizeof(v_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) vc.lunp = &v_req.lun[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) vc.target = &v_req.lun[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * Validate the size of request and response buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * Check for a sane response buffer so we can report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * early errors back to the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) ret = vhost_scsi_chk_size(vq, &vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) ret = vhost_scsi_get_req(vq, &vc, &tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ret = -EIO; /* bad target on any error from here on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * Determine data_direction by calculating the total outgoing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * response headers respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * For DMA_TO_DEVICE this is out_iter, which is already pointing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * to the right place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * For DMA_FROM_DEVICE, the iovec will be just past the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * of the virtio-scsi response header in either the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * or immediately following iovec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * Any associated T10_PI bytes for the outgoing / incoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * payloads are included in calculation of exp_data_len here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) prot_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (vc.out_size > vc.req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) data_direction = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) exp_data_len = vc.out_size - vc.req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) data_iter = vc.out_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) } else if (vc.in_size > vc.rsp_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) data_direction = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) exp_data_len = vc.in_size - vc.rsp_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) vc.rsp_size + exp_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) iov_iter_advance(&in_iter, vc.rsp_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) data_iter = in_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) data_direction = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) exp_data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * If T10_PI header + payload is present, setup prot_iter values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * and recalculate data_iter for vhost_scsi_mapal() mapping to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * host scatterlists via get_user_pages_fast().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (t10_pi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (v_req_pi.pi_bytesout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (data_direction != DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) vq_err(vq, "Received non zero pi_bytesout,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) " but wrong data_direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) } else if (v_req_pi.pi_bytesin) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (data_direction != DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) vq_err(vq, "Received non zero pi_bytesin,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) " but wrong data_direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Set prot_iter to data_iter and truncate it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * prot_bytes, and advance data_iter past any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * preceeding prot_bytes that may be present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * Also fix up the exp_data_len to reflect only the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * actual data payload length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (prot_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) exp_data_len -= prot_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) prot_iter = data_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) iov_iter_truncate(&prot_iter, prot_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) iov_iter_advance(&data_iter, prot_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) tag = vhost64_to_cpu(vq, v_req_pi.tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) task_attr = v_req_pi.task_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) cdb = &v_req_pi.cdb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) lun = vhost_buf_to_lun(v_req_pi.lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) tag = vhost64_to_cpu(vq, v_req.tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) task_attr = v_req.task_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) cdb = &v_req.cdb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) lun = vhost_buf_to_lun(v_req.lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * Check that the received CDB size does not exceeded our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * hardcoded max for vhost-scsi, then get a pre-allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * cmd descriptor for the new virtio-scsi tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * TODO what if cdb was too small for varlen cdb header?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) vq_err(vq, "Received SCSI CDB with command_size: %d that"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) exp_data_len + prot_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (IS_ERR(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) vq_err(vq, "vhost_scsi_get_cmd failed %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) PTR_ERR(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) cmd->tvc_vhost = vs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) cmd->tvc_vq = vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) cmd->tvc_resp_iov = vq->iov[vc.out];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) cmd->tvc_in_iovs = vc.in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) cmd->tvc_cdb[0], cmd->tvc_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (data_direction != DMA_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) &prot_iter, exp_data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) &data_iter))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) vq_err(vq, "Failed to map iov to sgl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) vhost_scsi_release_cmd_res(&cmd->tvc_se_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * Save the descriptor from vhost_get_vq_desc() to be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * complete the virtio-scsi request in TCM callback context via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) cmd->tvc_vq_desc = vc.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * Dispatch cmd descriptor for cmwq execution in process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * context provided by vhost_scsi_workqueue. This also ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * cmd is executed on the same kworker CPU as this vhost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * thread to gain positive L2 cache locality effects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) INIT_WORK(&cmd->work, vhost_scsi_submission_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) queue_work(vhost_scsi_workqueue, &cmd->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * ENXIO: No more requests, or read error, wait for next kick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * EINVAL: Invalid response buffer, drop the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * EIO: Respond with bad target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * EAGAIN: Pending request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (ret == -ENXIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) else if (ret == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) vhost_scsi_send_tmf_resp(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int in_iovs, int vq_desc, struct iovec *resp_iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int tmf_resp_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct virtio_scsi_ctrl_tmf_resp rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct iov_iter iov_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) pr_debug("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) memset(&rsp, 0, sizeof(rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) rsp.response = tmf_resp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) iov_iter_init(&iov_iter, READ, resp_iov, in_iovs, sizeof(rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (likely(ret == sizeof(rsp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) vhost_add_used_and_signal(&vs->dev, vq, vq_desc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct vhost_scsi_tmf *tmf = container_of(work, struct vhost_scsi_tmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) vwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) int resp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) vhost_scsi_send_tmf_resp(tmf->vhost, &tmf->svq->vq, tmf->in_iovs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) tmf->vq_desc, &tmf->resp_iov, resp_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) vhost_scsi_release_tmf_res(tmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct vhost_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct virtio_scsi_ctrl_tmf_req *vtmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct vhost_scsi_ctx *vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct vhost_scsi_virtqueue *svq = container_of(vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct vhost_scsi_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct vhost_scsi_tmf *tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (vhost32_to_cpu(vq, vtmf->subtype) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) goto send_reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) pr_err("Unable to locate active struct vhost_scsi_nexus for LUN RESET.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) goto send_reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) mutex_lock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (list_empty(&tpg->tmf_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) pr_err("Missing reserve TMF. Could not handle LUN RESET.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) goto send_reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) list_del_init(&tmf->queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) tmf->tpg = tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) tmf->vhost = vs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) tmf->svq = svq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) tmf->resp_iov = vq->iov[vc->out];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) tmf->vq_desc = vc->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) tmf->in_iovs = vc->in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) tmf->inflight = vhost_scsi_get_inflight(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) vhost_buf_to_lun(vtmf->lun), NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) TMR_LUN_RESET, GFP_KERNEL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) TARGET_SCF_ACK_KREF) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) vhost_scsi_release_tmf_res(tmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) goto send_reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) send_reject:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) vhost_scsi_send_tmf_resp(vs, vq, vc->in, vc->head, &vq->iov[vc->out],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) VIRTIO_SCSI_S_FUNCTION_REJECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) vhost_scsi_send_an_resp(struct vhost_scsi *vs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct vhost_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct vhost_scsi_ctx *vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct virtio_scsi_ctrl_an_resp rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct iov_iter iov_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) pr_debug("%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) rsp.response = VIRTIO_SCSI_S_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (likely(ret == sizeof(rsp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct vhost_scsi_tpg *tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) __virtio32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct virtio_scsi_ctrl_an_req an;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct virtio_scsi_ctrl_tmf_req tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) } v_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct vhost_scsi_ctx vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) size_t typ_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) int ret, c = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * We can handle the vq only after the endpoint is setup by calling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * VHOST_SCSI_SET_ENDPOINT ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (!vhost_vq_get_backend(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) memset(&vc, 0, sizeof(vc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) vhost_disable_notify(&vs->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) ret = vhost_scsi_get_desc(vs, vq, &vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * Get the request type first in order to setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * other parameters dependent on the type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) vc.req = &v_req.type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) typ_size = sizeof(v_req.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (unlikely(!copy_from_iter_full(vc.req, typ_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) &vc.out_iter))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) vq_err(vq, "Faulted on copy_from_iter tmf type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * The size of the response buffer depends on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * request type and must be validated against it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * Since the request type is not known, don't send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * a response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) switch (vhost32_to_cpu(vq, v_req.type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) case VIRTIO_SCSI_T_TMF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) vc.req = &v_req.tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) vc.lunp = &v_req.tmf.lun[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) vc.target = &v_req.tmf.lun[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) case VIRTIO_SCSI_T_AN_QUERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) case VIRTIO_SCSI_T_AN_SUBSCRIBE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) vc.req = &v_req.an;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) vc.lunp = &v_req.an.lun[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) vc.target = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) vq_err(vq, "Unknown control request %d", v_req.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * Validate the size of request and response buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * Check for a sane response buffer so we can report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * early errors back to the guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) ret = vhost_scsi_chk_size(vq, &vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * Get the rest of the request now that its size is known.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) vc.req += typ_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) vc.req_size -= typ_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) ret = vhost_scsi_get_req(vq, &vc, &tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (v_req.type == VIRTIO_SCSI_T_TMF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) vhost_scsi_send_an_resp(vs, vq, &vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * ENXIO: No more requests, or read error, wait for next kick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * EINVAL: Invalid response buffer, drop the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * EIO: Respond with bad target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * EAGAIN: Pending request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (ret == -ENXIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) else if (ret == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) poll.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) pr_debug("%s: The handling func for control queue.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) vhost_scsi_ctl_handle_vq(vs, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) vhost_scsi_send_evt(struct vhost_scsi *vs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct vhost_scsi_tpg *tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) struct se_lun *lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) u32 event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) u32 reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct vhost_scsi_evt *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) evt = vhost_scsi_allocate_evt(vs, event, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (!evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (tpg && lun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /* TODO: share lun setup code with virtio-scsi.ko */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * Note: evt->event is zeroed when we allocate it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * lun[4-7] need to be zero according to virtio-scsi spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) evt->event.lun[0] = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) evt->event.lun[1] = tpg->tport_tpgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (lun->unpacked_lun >= 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) evt->event.lun[3] = lun->unpacked_lun & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) llist_add(&evt->list, &vs->vs_event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) vhost_work_queue(&vs->dev, &vs->vs_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) poll.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (!vhost_vq_get_backend(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (vs->vs_events_missed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static void vhost_scsi_handle_kick(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) poll.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) vhost_scsi_handle_vq(vs, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) vhost_poll_flush(&vs->vqs[index].vq.poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /* Callers must hold dev mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static void vhost_scsi_flush(struct vhost_scsi *vs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /* Init new inflight and remember the old inflight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) vhost_scsi_init_inflight(vs, old_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * The inflight->kref was initialized to 1. We decrement it here to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * indicate the start of the flush operation so that it will reach 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * when all the reqs are finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* Flush both the vhost poll and vhost work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) vhost_scsi_flush_vq(vs, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) vhost_work_flush(&vs->dev, &vs->vs_completion_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) vhost_work_flush(&vs->dev, &vs->vs_event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* Wait for all reqs issued before the flush to be finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) wait_for_completion(&old_inflight[i]->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) struct vhost_scsi_virtqueue *svq = container_of(vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) struct vhost_scsi_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) struct vhost_scsi_cmd *tv_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (!svq->scsi_cmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) for (i = 0; i < svq->max_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) tv_cmd = &svq->scsi_cmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) kfree(tv_cmd->tvc_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) kfree(tv_cmd->tvc_prot_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) kfree(tv_cmd->tvc_upages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) sbitmap_free(&svq->scsi_tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) kfree(svq->scsi_cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) svq->scsi_cmds = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct vhost_scsi_virtqueue *svq = container_of(vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) struct vhost_scsi_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) struct vhost_scsi_cmd *tv_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (svq->scsi_cmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (sbitmap_init_node(&svq->scsi_tags, max_cmds, -1, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) NUMA_NO_NODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) svq->max_cmds = max_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) svq->scsi_cmds = kcalloc(max_cmds, sizeof(*tv_cmd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (!svq->scsi_cmds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) sbitmap_free(&svq->scsi_tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) for (i = 0; i < max_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) tv_cmd = &svq->scsi_cmds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) tv_cmd->tvc_sgl = kcalloc(VHOST_SCSI_PREALLOC_SGLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (!tv_cmd->tvc_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) tv_cmd->tvc_upages = kcalloc(VHOST_SCSI_PREALLOC_UPAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) sizeof(struct page *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (!tv_cmd->tvc_upages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) pr_err("Unable to allocate tv_cmd->tvc_upages\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (!tv_cmd->tvc_prot_sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) vhost_scsi_destroy_vq_cmds(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * Called from vhost_scsi_ioctl() context to walk the list of available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) * vhost_scsi_tpg with an active struct vhost_scsi_nexus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) * The lock nesting rule is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) vhost_scsi_set_endpoint(struct vhost_scsi *vs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) struct vhost_scsi_target *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct se_portal_group *se_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) struct vhost_scsi_tport *tv_tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) struct vhost_scsi_tpg *tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) struct vhost_scsi_tpg **vs_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct vhost_virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) int index, ret, i, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) bool match = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) mutex_lock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) mutex_lock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /* Verify that ring has been setup correctly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) for (index = 0; index < vs->dev.nvqs; ++index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) /* Verify that ring has been setup correctly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) vs_tpg = kzalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (!vs_tpg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (vs->vs_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) memcpy(vs_tpg, vs->vs_tpg, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) mutex_lock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (!tpg->tpg_nexus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (tpg->tv_tpg_vhost_count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) tv_tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) goto undepend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * In order to ensure individual vhost-scsi configfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * groups cannot be removed while in use by vhost ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * go ahead and take an explicit se_tpg->tpg_group.cg_item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * dependency now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) se_tpg = &tpg->se_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) ret = target_depend_item(&se_tpg->tpg_group.cg_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) pr_warn("target_depend_item() failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) goto undepend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) tpg->tv_tpg_vhost_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) tpg->vhost_scsi = vs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) vs_tpg[tpg->tport_tpgt] = tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) match = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) sizeof(vs->vs_vhost_wwpn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) vq = &vs->vqs[i].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (!vhost_vq_is_setup(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) goto destroy_vq_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) vq = &vs->vqs[i].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) vhost_vq_set_backend(vq, vs_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) vhost_vq_init_access(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * Act as synchronize_rcu to make sure access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * old vs->vs_tpg is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) vhost_scsi_flush(vs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) kfree(vs->vs_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) vs->vs_tpg = vs_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) destroy_vq_cmds:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) for (i--; i >= VHOST_SCSI_VQ_IO; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (!vhost_vq_get_backend(&vs->vqs[i].vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) vhost_scsi_destroy_vq_cmds(&vs->vqs[i].vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) undepend:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) tpg = vs_tpg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (tpg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) tpg->tv_tpg_vhost_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) kfree(vs_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) mutex_unlock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) mutex_unlock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct vhost_scsi_target *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) struct se_portal_group *se_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) struct vhost_scsi_tport *tv_tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) struct vhost_scsi_tpg *tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) struct vhost_virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) bool match = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) int index, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) u8 target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) mutex_lock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) mutex_lock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) /* Verify that ring has been setup correctly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) for (index = 0; index < vs->dev.nvqs; ++index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) goto err_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (!vs->vs_tpg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) goto err_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) target = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) tpg = vs->vs_tpg[target];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (!tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) mutex_lock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) tv_tport = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (!tv_tport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) goto err_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) tv_tport->tport_name, tpg->tport_tpgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) t->vhost_wwpn, t->vhost_tpgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) goto err_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) tpg->tv_tpg_vhost_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) tpg->vhost_scsi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) vs->vs_tpg[target] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) match = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) * Release se_tpg->tpg_group.cg_item configfs dependency now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) se_tpg = &tpg->se_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) target_undepend_item(&se_tpg->tpg_group.cg_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) vq = &vs->vqs[i].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) vhost_vq_set_backend(vq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * Make sure cmds are not running before tearing them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) vhost_scsi_flush(vs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) vhost_scsi_destroy_vq_cmds(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * Act as synchronize_rcu to make sure access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * old vs->vs_tpg is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) vhost_scsi_flush(vs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) kfree(vs->vs_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) vs->vs_tpg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) WARN_ON(vs->vs_events_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) mutex_unlock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) mutex_unlock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) err_tpg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) err_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) mutex_unlock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) mutex_unlock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) struct vhost_virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (features & ~VHOST_SCSI_FEATURES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) mutex_lock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if ((features & (1 << VHOST_F_LOG_ALL)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) !vhost_log_access_ok(&vs->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) mutex_unlock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) vq = &vs->vqs[i].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) vq->acked_features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) mutex_unlock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static int vhost_scsi_open(struct inode *inode, struct file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) struct vhost_scsi *vs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) struct vhost_virtqueue **vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) int r = -ENOMEM, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (!vs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) vs = vzalloc(sizeof(*vs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (!vs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) goto err_vs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) vqs = kmalloc_array(VHOST_SCSI_MAX_VQ, sizeof(*vqs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (!vqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) goto err_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) vs->vs_events_nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) vs->vs_events_missed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) vqs[i] = &vs->vqs[i].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) VHOST_SCSI_WEIGHT, 0, true, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) vhost_scsi_init_inflight(vs, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) f->private_data = vs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) err_vqs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) kvfree(vs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) err_vs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static int vhost_scsi_release(struct inode *inode, struct file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) struct vhost_scsi *vs = f->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) struct vhost_scsi_target t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) mutex_lock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) mutex_unlock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) vhost_scsi_clear_endpoint(vs, &t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) vhost_dev_stop(&vs->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) vhost_dev_cleanup(&vs->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) vhost_scsi_flush(vs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) kfree(vs->dev.vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) kvfree(vs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) vhost_scsi_ioctl(struct file *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) unsigned int ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct vhost_scsi *vs = f->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) struct vhost_scsi_target backend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) u64 __user *featurep = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) u32 __user *eventsp = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) u32 events_missed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) u64 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) int r, abi_version = VHOST_SCSI_ABI_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) switch (ioctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) case VHOST_SCSI_SET_ENDPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (copy_from_user(&backend, argp, sizeof backend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (backend.reserved != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) return vhost_scsi_set_endpoint(vs, &backend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) case VHOST_SCSI_CLEAR_ENDPOINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (copy_from_user(&backend, argp, sizeof backend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (backend.reserved != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) return vhost_scsi_clear_endpoint(vs, &backend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) case VHOST_SCSI_GET_ABI_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) if (copy_to_user(argp, &abi_version, sizeof abi_version))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) case VHOST_SCSI_SET_EVENTS_MISSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (get_user(events_missed, eventsp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) vs->vs_events_missed = events_missed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) case VHOST_SCSI_GET_EVENTS_MISSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) events_missed = vs->vs_events_missed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (put_user(events_missed, eventsp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) case VHOST_GET_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) features = VHOST_SCSI_FEATURES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (copy_to_user(featurep, &features, sizeof features))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) case VHOST_SET_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (copy_from_user(&features, featurep, sizeof features))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) return vhost_scsi_set_features(vs, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) mutex_lock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) /* TODO: flush backend after dev ioctl. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (r == -ENOIOCTLCMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) mutex_unlock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) static const struct file_operations vhost_scsi_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) .release = vhost_scsi_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) .unlocked_ioctl = vhost_scsi_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) .compat_ioctl = compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) .open = vhost_scsi_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) .llseek = noop_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) static struct miscdevice vhost_scsi_misc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) MISC_DYNAMIC_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) "vhost-scsi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) &vhost_scsi_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) static int __init vhost_scsi_register(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return misc_register(&vhost_scsi_misc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) static void vhost_scsi_deregister(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) misc_deregister(&vhost_scsi_misc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) switch (tport->tport_proto_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) case SCSI_PROTOCOL_SAS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) return "SAS";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) case SCSI_PROTOCOL_FCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) return "FCP";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) case SCSI_PROTOCOL_ISCSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) return "iSCSI";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) return "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct se_lun *lun, bool plug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct vhost_scsi *vs = tpg->vhost_scsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) struct vhost_virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) u32 reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (!vs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) mutex_lock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (plug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) vhost_scsi_send_evt(vs, tpg, lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) mutex_unlock(&vs->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) vhost_scsi_do_plug(tpg, lun, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) vhost_scsi_do_plug(tpg, lun, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) struct se_lun *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) struct vhost_scsi_tpg *tpg = container_of(se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) struct vhost_scsi_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) struct vhost_scsi_tmf *tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) tmf = kzalloc(sizeof(*tmf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (!tmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) INIT_LIST_HEAD(&tmf->queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) vhost_work_init(&tmf->vwork, vhost_scsi_tmf_resp_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) mutex_lock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) mutex_lock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) tpg->tv_tpg_port_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) list_add_tail(&tmf->queue_entry, &tpg->tmf_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) vhost_scsi_hotplug(tpg, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) mutex_unlock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) struct se_lun *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) struct vhost_scsi_tpg *tpg = container_of(se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) struct vhost_scsi_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) struct vhost_scsi_tmf *tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) mutex_lock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) mutex_lock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) tpg->tv_tpg_port_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) tmf = list_first_entry(&tpg->tmf_queue, struct vhost_scsi_tmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) list_del(&tmf->queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) kfree(tmf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) vhost_scsi_hotunplug(tpg, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) mutex_unlock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) struct config_item *item, const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) struct se_portal_group *se_tpg = attrib_to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) struct vhost_scsi_tpg *tpg = container_of(se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) struct vhost_scsi_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) int ret = kstrtoul(page, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (val != 0 && val != 1 && val != 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) tpg->tv_fabric_prot_type = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) struct config_item *item, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) struct se_portal_group *se_tpg = attrib_to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) struct vhost_scsi_tpg *tpg = container_of(se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) struct vhost_scsi_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) CONFIGFS_ATTR(vhost_scsi_tpg_attrib_, fabric_prot_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) &vhost_scsi_tpg_attrib_attr_fabric_prot_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) struct vhost_scsi_nexus *tv_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) mutex_lock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (tpg->tpg_nexus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) pr_debug("tpg->tpg_nexus already exists\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (!tv_nexus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) pr_err("Unable to allocate struct vhost_scsi_nexus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) * Since we are running in 'demo mode' this call with generate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * struct se_node_acl for the vhost_scsi struct se_portal_group with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * the SCSI Initiator port name of the passed configfs group 'name'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) (unsigned char *)name, tv_nexus, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (IS_ERR(tv_nexus->tvn_se_sess)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) kfree(tv_nexus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) tpg->tpg_nexus = tv_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct se_session *se_sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct vhost_scsi_nexus *tv_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) mutex_lock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) tv_nexus = tpg->tpg_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (!tv_nexus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) se_sess = tv_nexus->tvn_se_sess;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (!se_sess) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (tpg->tv_tpg_port_count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) pr_err("Unable to remove TCM_vhost I_T Nexus with"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) " active TPG port count: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) tpg->tv_tpg_port_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (tpg->tv_tpg_vhost_count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) pr_err("Unable to remove TCM_vhost I_T Nexus with"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) " active TPG vhost count: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) tpg->tv_tpg_vhost_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * Release the SCSI I_T Nexus to the emulated vhost Target Port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) target_remove_session(se_sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) tpg->tpg_nexus = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) kfree(tv_nexus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) static ssize_t vhost_scsi_tpg_nexus_show(struct config_item *item, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) struct se_portal_group *se_tpg = to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) struct vhost_scsi_tpg *tpg = container_of(se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) struct vhost_scsi_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) struct vhost_scsi_nexus *tv_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) mutex_lock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) tv_nexus = tpg->tpg_nexus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) if (!tv_nexus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) ret = snprintf(page, PAGE_SIZE, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) mutex_unlock(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) static ssize_t vhost_scsi_tpg_nexus_store(struct config_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) const char *page, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) struct se_portal_group *se_tpg = to_tpg(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) struct vhost_scsi_tpg *tpg = container_of(se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) struct vhost_scsi_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) struct vhost_scsi_tport *tport_wwn = tpg->tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * Shutdown the active I_T nexus if 'NULL' is passed..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) if (!strncmp(page, "NULL", 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) ret = vhost_scsi_drop_nexus(tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) return (!ret) ? count : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) * Otherwise make sure the passed virtual Initiator port WWN matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * the fabric protocol_id set in vhost_scsi_make_tport(), and call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * vhost_scsi_make_nexus().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) if (strlen(page) >= VHOST_SCSI_NAMELEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) pr_err("Emulated NAA Sas Address: %s, exceeds"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) " max: %d\n", page, VHOST_SCSI_NAMELEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) ptr = strstr(i_port, "naa.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) pr_err("Passed SAS Initiator Port %s does not"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) " match target port protoid: %s\n", i_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) vhost_scsi_dump_proto_id(tport_wwn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) port_ptr = &i_port[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) goto check_newline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) ptr = strstr(i_port, "fc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) pr_err("Passed FCP Initiator Port %s does not"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) " match target port protoid: %s\n", i_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) vhost_scsi_dump_proto_id(tport_wwn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) port_ptr = &i_port[3]; /* Skip over "fc." */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) goto check_newline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) ptr = strstr(i_port, "iqn.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) pr_err("Passed iSCSI Initiator Port %s does not"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) " match target port protoid: %s\n", i_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) vhost_scsi_dump_proto_id(tport_wwn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) port_ptr = &i_port[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) goto check_newline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) pr_err("Unable to locate prefix for emulated Initiator Port:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) " %s\n", i_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * Clear any trailing newline for the NAA WWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) check_newline:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (i_port[strlen(i_port)-1] == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) i_port[strlen(i_port)-1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) ret = vhost_scsi_make_nexus(tpg, port_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) CONFIGFS_ATTR(vhost_scsi_tpg_, nexus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) &vhost_scsi_tpg_attr_nexus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) static struct se_portal_group *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) struct vhost_scsi_tport *tport = container_of(wwn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) struct vhost_scsi_tport, tport_wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) struct vhost_scsi_tpg *tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) u16 tpgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) if (strstr(name, "tpgt_") != name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) if (!tpg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) pr_err("Unable to allocate struct vhost_scsi_tpg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) mutex_init(&tpg->tv_tpg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) INIT_LIST_HEAD(&tpg->tv_tpg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) INIT_LIST_HEAD(&tpg->tmf_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) tpg->tport = tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) tpg->tport_tpgt = tpgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) kfree(tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) mutex_lock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) mutex_unlock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) return &tpg->se_tpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) struct vhost_scsi_tpg *tpg = container_of(se_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) struct vhost_scsi_tpg, se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) mutex_lock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) list_del(&tpg->tv_tpg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) mutex_unlock(&vhost_scsi_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * Release the virtual I_T Nexus for this vhost TPG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) vhost_scsi_drop_nexus(tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * Deregister the se_tpg from TCM..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) core_tpg_deregister(se_tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) kfree(tpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) static struct se_wwn *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) vhost_scsi_make_tport(struct target_fabric_configfs *tf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) struct config_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) struct vhost_scsi_tport *tport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) u64 wwpn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) int off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return ERR_PTR(-EINVAL); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) tport = kzalloc(sizeof(*tport), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (!tport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) pr_err("Unable to allocate struct vhost_scsi_tport");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) tport->tport_wwpn = wwpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) * Determine the emulated Protocol Identifier and Target Port Name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * based on the incoming configfs directory name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) ptr = strstr(name, "naa.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) tport->tport_proto_id = SCSI_PROTOCOL_SAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) goto check_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) ptr = strstr(name, "fc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) tport->tport_proto_id = SCSI_PROTOCOL_FCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) off = 3; /* Skip over "fc." */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) goto check_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) ptr = strstr(name, "iqn.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) if (ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) goto check_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) pr_err("Unable to locate prefix for emulated Target Port:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) " %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) kfree(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) check_len:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (strlen(name) >= VHOST_SCSI_NAMELEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) pr_err("Emulated %s Address: %s, exceeds"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) VHOST_SCSI_NAMELEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) kfree(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) return &tport->tport_wwn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) static void vhost_scsi_drop_tport(struct se_wwn *wwn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) struct vhost_scsi_tport *tport = container_of(wwn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) struct vhost_scsi_tport, tport_wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) tport->tport_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) kfree(tport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) vhost_scsi_wwn_version_show(struct config_item *item, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) utsname()->machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) CONFIGFS_ATTR_RO(vhost_scsi_wwn_, version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) &vhost_scsi_wwn_attr_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) static const struct target_core_fabric_ops vhost_scsi_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) .fabric_name = "vhost",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) .max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) .tpg_get_wwn = vhost_scsi_get_fabric_wwn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) .tpg_get_tag = vhost_scsi_get_tpgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) .tpg_check_demo_mode = vhost_scsi_check_true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) .tpg_check_demo_mode_cache = vhost_scsi_check_true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) .release_cmd = vhost_scsi_release_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) .check_stop_free = vhost_scsi_check_stop_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) .sess_get_index = vhost_scsi_sess_get_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) .sess_get_initiator_sid = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) .write_pending = vhost_scsi_write_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) .set_default_node_attributes = vhost_scsi_set_default_node_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) .get_cmd_state = vhost_scsi_get_cmd_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) .queue_data_in = vhost_scsi_queue_data_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) .queue_status = vhost_scsi_queue_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) .queue_tm_rsp = vhost_scsi_queue_tm_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) .aborted_task = vhost_scsi_aborted_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) * Setup callers for generic logic in target_core_fabric_configfs.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) .fabric_make_wwn = vhost_scsi_make_tport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) .fabric_drop_wwn = vhost_scsi_drop_tport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) .fabric_make_tpg = vhost_scsi_make_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) .fabric_drop_tpg = vhost_scsi_drop_tpg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) .fabric_post_link = vhost_scsi_port_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) .fabric_pre_unlink = vhost_scsi_port_unlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) static int __init vhost_scsi_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) pr_debug("TCM_VHOST fabric module %s on %s/%s"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) utsname()->machine);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) * Use our own dedicated workqueue for submitting I/O into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) * target core to avoid contention within system_wq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) if (!vhost_scsi_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) ret = vhost_scsi_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) goto out_destroy_workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) ret = target_register_template(&vhost_scsi_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) goto out_vhost_scsi_deregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) out_vhost_scsi_deregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) vhost_scsi_deregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) out_destroy_workqueue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) destroy_workqueue(vhost_scsi_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) static void vhost_scsi_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) target_unregister_template(&vhost_scsi_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) vhost_scsi_deregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) destroy_workqueue(vhost_scsi_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) MODULE_ALIAS("tcm_vhost");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) module_init(vhost_scsi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) module_exit(vhost_scsi_exit);