^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 1999 Eric Youngdale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2014 Christoph Hellwig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * SCSI queueing library.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Initial versions: Eric Youngdale (eric@andante.org).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Based upon conversations with large numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * of people at Linux Expo.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <scsi/scsi_dbg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <scsi/scsi_driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <scsi/scsi_eh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <scsi/scsi_transport.h> /* __scsi_init_queue() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <scsi/scsi_dh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <trace/events/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "scsi_debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "scsi_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "scsi_logging.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Size of integrity metadata is usually small, 1 inline sg should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * cover normal cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #ifdef CONFIG_ARCH_NO_SG_CHAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SCSI_INLINE_PROT_SG_CNT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SCSI_INLINE_SG_CNT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SCSI_INLINE_PROT_SG_CNT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SCSI_INLINE_SG_CNT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static struct kmem_cache *scsi_sense_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static struct kmem_cache *scsi_sense_isadma_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static DEFINE_MUTEX(scsi_sense_cache_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline struct kmem_cache *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) scsi_select_sense_cache(bool unchecked_isa_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static void scsi_free_sense_buffer(bool unchecked_isa_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned char *sense_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) sense_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) gfp_t gfp_mask, int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) gfp_mask, numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int scsi_init_sense_cache(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct kmem_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) mutex_lock(&scsi_sense_cache_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (shost->unchecked_isa_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) scsi_sense_isadma_cache =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) kmem_cache_create("scsi_sense_cache(DMA)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) SCSI_SENSE_BUFFERSIZE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!scsi_sense_isadma_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) scsi_sense_cache =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) kmem_cache_create_usercopy("scsi_sense_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 0, SCSI_SENSE_BUFFERSIZE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!scsi_sense_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) mutex_unlock(&scsi_sense_cache_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * When to reinvoke queueing after a resource shortage. It's 3 msecs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * not change behaviour from the previous unplug mechanism, experimentation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * may prove this needs changing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define SCSI_QUEUE_DELAY 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct Scsi_Host *host = cmd->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct scsi_device *device = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct scsi_target *starget = scsi_target(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Set the appropriate busy bit for the device/host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * If the host/device isn't busy, assume that something actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * completed, and that we should be able to queue a command now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Note that the prior mid-layer assumption that any host could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * always queue at least one command is now broken. The mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * will implement a user specifiable stall (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * if a command is requeued with no other commands outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * either for the device or for the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) switch (reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) case SCSI_MLQUEUE_HOST_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) atomic_set(&host->host_blocked, host->max_host_blocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) case SCSI_MLQUEUE_DEVICE_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) case SCSI_MLQUEUE_EH_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) atomic_set(&device->device_blocked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) device->max_device_blocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) case SCSI_MLQUEUE_TARGET_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) atomic_set(&starget->target_blocked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) starget->max_target_blocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (cmd->request->rq_flags & RQF_DONTPREP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) cmd->request->rq_flags &= ~RQF_DONTPREP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) scsi_mq_uninit_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) WARN_ON_ONCE(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) blk_mq_requeue_request(cmd->request, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * __scsi_queue_insert - private queue insertion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @cmd: The SCSI command being requeued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * @reason: The reason for the requeue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @unbusy: Whether the queue should be unbusied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * This is a private queue insertion. The public interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * scsi_queue_insert() always assumes the queue should be unbusied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * because it's always called before the completion. This function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * for a requeue after completion, which should only occur in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct scsi_device *device = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) "Inserting command %p into mlqueue\n", cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) scsi_set_blocked(cmd, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Decrement the counters, since these commands are no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * active on the host/device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (unbusy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) scsi_device_unbusy(device, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Requeue this command. It will go before all other commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * that are already in the queue. Schedule requeue work under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * lock such that the kblockd_schedule_work() call happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * before blk_cleanup_queue() finishes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) cmd->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) blk_mq_requeue_request(cmd->request, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * scsi_queue_insert - Reinsert a command in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * @cmd: command that we are adding to queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @reason: why we are inserting command to queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * We do this for one of two cases. Either the host is busy and it cannot accept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * any more commands for the time being, or the device returned QUEUE_FULL and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * can accept no more commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Context: This could be called either from an interrupt context or a normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) __scsi_queue_insert(cmd, reason, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * __scsi_execute - insert request and wait for the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * @sdev: scsi device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * @cmd: scsi command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * @data_direction: data direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * @buffer: data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * @bufflen: len of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * @sense: optional sense buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @sshdr: optional decoded sense header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * @timeout: request timeout in seconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * @retries: number of times to retry request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * @flags: flags for ->cmd_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * @rq_flags: flags for ->rq_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * @resid: optional residual length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * Returns the scsi_cmnd result field if a command was executed, or a negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Linux error code if we didn't get that far.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int data_direction, void *buffer, unsigned bufflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) unsigned char *sense, struct scsi_sense_hdr *sshdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int timeout, int retries, u64 flags, req_flags_t rq_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int *resid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct scsi_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int ret = DRIVER_ERROR << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) req = blk_get_request(sdev->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) data_direction == DMA_TO_DEVICE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (IS_ERR(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) rq = scsi_req(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) buffer, bufflen, GFP_NOIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) rq->cmd_len = COMMAND_SIZE(cmd[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) memcpy(rq->cmd, cmd, rq->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) rq->retries = retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) req->timeout = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) req->cmd_flags |= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) req->rq_flags |= rq_flags | RQF_QUIET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * head injection *required* here otherwise quiesce won't work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) blk_execute_rq(req->q, NULL, req, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * Some devices (USB mass-storage in particular) may transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * garbage data together with a residue indicating that the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * is invalid. Prevent the garbage from being misinterpreted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * and prevent security leaks by zeroing out the excess data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (resid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *resid = rq->resid_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (sense && rq->sense_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ret = rq->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) blk_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) EXPORT_SYMBOL(__scsi_execute);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * Wake up the error handler if necessary. Avoid as follows that the error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * handler is not woken up if host in-flight requests number ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * with an RCU read lock in this function to ensure that this function in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * its entirety either finishes before scsi_eh_scmd_add() increases the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * host_failed counter or that it notices the shost state change made by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * scsi_eh_scmd_add().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (unlikely(scsi_host_in_recovery(shost))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (shost->host_failed || shost->host_eh_scheduled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) scsi_eh_wakeup(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct Scsi_Host *shost = sdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct scsi_target *starget = scsi_target(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) scsi_dec_host_busy(shost, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (starget->can_queue > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) atomic_dec(&starget->target_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) atomic_dec(&sdev->device_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void scsi_kick_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) blk_mq_run_hw_queues(q, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Called for single_lun devices on IO completion. Clear starget_sdev_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * and call blk_run_queue for all the scsi_devices on the target -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * including current_sdev first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * Called with *no* scsi locks held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void scsi_single_lun_run(struct scsi_device *current_sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct Scsi_Host *shost = current_sdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct scsi_device *sdev, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct scsi_target *starget = scsi_target(current_sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) starget->starget_sdev_user = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * Call blk_run_queue for all LUNs on the target, starting with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * current_sdev. We race with others (to set starget_sdev_user),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * but in most cases, we will be first. Ideally, each LU on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * target would get some limited time or requests on the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) scsi_kick_queue(current_sdev->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (starget->starget_sdev_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) list_for_each_entry_safe(sdev, tmp, &starget->devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) same_target_siblings) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (sdev == current_sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (scsi_device_get(sdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) scsi_kick_queue(sdev->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static inline bool scsi_device_is_busy(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (atomic_read(&sdev->device_blocked) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static inline bool scsi_target_is_busy(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (starget->can_queue > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (atomic_read(&starget->target_busy) >= starget->can_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (atomic_read(&starget->target_blocked) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (atomic_read(&shost->host_blocked) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (shost->host_self_blocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void scsi_starved_list_run(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) LIST_HEAD(starved_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) list_splice_init(&shost->starved_list, &starved_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) while (!list_empty(&starved_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct request_queue *slq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * As long as shost is accepting commands and we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * starved queues, call blk_run_queue. scsi_request_fn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * drops the queue_lock and can add us back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * starved_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * host_lock protects the starved_list and starved_entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * scsi_request_fn must get the host_lock before checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * or modifying starved_list or starved_entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (scsi_host_is_busy(shost))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) sdev = list_entry(starved_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct scsi_device, starved_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) list_del_init(&sdev->starved_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (scsi_target_is_busy(scsi_target(sdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) list_move_tail(&sdev->starved_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) &shost->starved_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Once we drop the host lock, a racing scsi_remove_device()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * call may remove the sdev from the starved list and destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * it and the queue. Mitigate by taking a reference to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * queue and never touching the sdev again after we drop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * host lock. Note: if __scsi_remove_device() invokes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * blk_cleanup_queue() before the queue is run from this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * function then blk_run_queue() will return immediately since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) slq = sdev->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!blk_get_queue(slq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) scsi_kick_queue(slq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) blk_put_queue(slq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* put any unprocessed entries back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) list_splice(&starved_list, &shost->starved_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * scsi_run_queue - Select a proper request queue to serve next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * @q: last request's queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * The previous command was completely finished, start a new one if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static void scsi_run_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct scsi_device *sdev = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (scsi_target(sdev)->single_lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) scsi_single_lun_run(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!list_empty(&sdev->host->starved_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) scsi_starved_list_run(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) blk_mq_run_hw_queues(q, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) void scsi_requeue_run_queue(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) sdev = container_of(work, struct scsi_device, requeue_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) q = sdev->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) scsi_run_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) void scsi_run_host_queues(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) shost_for_each_device(sdev, shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) scsi_run_queue(sdev->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!blk_rq_is_passthrough(cmd->request)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (drv->uninit_command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) drv->uninit_command(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) void scsi_free_sgtables(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (cmd->sdb.table.nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) sg_free_table_chained(&cmd->sdb.table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) SCSI_INLINE_SG_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (scsi_prot_sg_count(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) sg_free_table_chained(&cmd->prot_sdb->table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) SCSI_INLINE_PROT_SG_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) EXPORT_SYMBOL_GPL(scsi_free_sgtables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) scsi_free_sgtables(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) scsi_uninit_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static void scsi_run_queue_async(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (scsi_target(sdev)->single_lun ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) !list_empty(&sdev->host->starved_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) kblockd_schedule_work(&sdev->requeue_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * smp_mb() present in sbitmap_queue_clear() or implied in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * .end_io is for ordering writing .device_busy in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * scsi_device_unbusy() and reading sdev->restarts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int old = atomic_read(&sdev->restarts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * ->restarts has to be kept as non-zero if new budget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * contention occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * No need to run queue when either another re-run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * queue wins in updating ->restarts or a new budget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * contention occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) blk_mq_run_hw_queues(sdev->request_queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Returns false when no more bytes to process, true if there are more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static bool scsi_end_request(struct request *req, blk_status_t error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) unsigned int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct scsi_device *sdev = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct request_queue *q = sdev->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (blk_update_request(req, error, bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (blk_queue_add_random(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) add_disk_randomness(req->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (!blk_rq_is_scsi(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) cmd->flags &= ~SCMD_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * Calling rcu_barrier() is not necessary here because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * SCSI error handler guarantees that the function called by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * call_rcu() has been called before scsi_end_request() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) destroy_rcu_head(&cmd->rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * In the MQ case the command gets freed by __blk_mq_end_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * so we have to do all cleanup that depends on it earlier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * We also can't kick the queues from irq context, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * will have to defer it to a workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) scsi_mq_uninit_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * queue is still alive, so grab the ref for preventing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * from being cleaned up during running queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) percpu_ref_get(&q->q_usage_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) __blk_mq_end_request(req, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) scsi_run_queue_async(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) percpu_ref_put(&q->q_usage_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * @cmd: SCSI command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * @result: scsi error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * Translate a SCSI result code into a blk_status_t value. May reset the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * byte of @cmd->result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) switch (host_byte(result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) case DID_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * Also check the other bytes than the status byte in result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * to handle the case when a SCSI LLD sets result to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * DRIVER_SENSE << 24 without setting SAM_STAT_CHECK_CONDITION.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (scsi_status_is_good(result) && (result & ~0xff) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) case DID_TRANSPORT_FAILFAST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return BLK_STS_TRANSPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) case DID_TARGET_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) set_host_byte(cmd, DID_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return BLK_STS_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) case DID_NEXUS_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) set_host_byte(cmd, DID_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return BLK_STS_NEXUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) case DID_ALLOC_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) set_host_byte(cmd, DID_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return BLK_STS_NOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) case DID_MEDIUM_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) set_host_byte(cmd, DID_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return BLK_STS_MEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Helper for scsi_io_completion() when "reprep" action required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* A new command will be prepared and issued. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) scsi_mq_requeue_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct request *req = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) unsigned long wait_for;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) wait_for = (cmd->allowed + 1) * req->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) wait_for/HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* Helper for scsi_io_completion() when special action required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct request_queue *q = cmd->device->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct request *req = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) int level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ACTION_DELAYED_RETRY} action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) bool sense_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) bool sense_current = true; /* false implies "deferred sense" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) blk_status_t blk_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (sense_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) sense_current = !scsi_sense_is_deferred(&sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) blk_stat = scsi_result_to_blk_status(cmd, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (host_byte(result) == DID_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Third party bus reset or reset for error recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * reasons. Just retry the command and see what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) action = ACTION_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) } else if (sense_valid && sense_current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) switch (sshdr.sense_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) case UNIT_ATTENTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (cmd->device->removable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* Detected disc change. Set a bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * and quietly refuse further access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) cmd->device->changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* Must have been a power glitch, or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * bus reset. Could not have been a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * media change, so we just retry the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * command and see what happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) action = ACTION_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) case ILLEGAL_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /* If we had an ILLEGAL REQUEST returned, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * we may have performed an unsupported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * command. The only thing this should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * would be a ten byte read where only a six
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * byte read was supported. Also, on a system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * where READ CAPACITY failed, we may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * read past the end of the disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if ((cmd->device->use_10_for_rw &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) (cmd->cmnd[0] == READ_10 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) cmd->cmnd[0] == WRITE_10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* This will issue a new 6-byte command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) cmd->device->use_10_for_rw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) action = ACTION_REPREP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) } else if (sshdr.asc == 0x10) /* DIX */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) blk_stat = BLK_STS_PROTECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) blk_stat = BLK_STS_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) case ABORTED_COMMAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (sshdr.asc == 0x10) /* DIF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) blk_stat = BLK_STS_PROTECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) case NOT_READY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* If the device is in the process of becoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * ready, or has a temporary blockage, retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (sshdr.asc == 0x04) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) switch (sshdr.ascq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) case 0x01: /* becoming ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) case 0x04: /* format in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) case 0x05: /* rebuild in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) case 0x06: /* recalculation in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) case 0x07: /* operation in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) case 0x08: /* Long write in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) case 0x09: /* self test in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) case 0x11: /* notify (enable spinup) required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) case 0x14: /* space allocation in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) case 0x1a: /* start stop unit in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) case 0x1b: /* sanitize in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) case 0x1d: /* configuration in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) case 0x24: /* depopulation in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) action = ACTION_DELAYED_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) case VOLUME_OVERFLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* See SSC3rXX or current. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) case DATA_PROTECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) (sshdr.asc == 0x55 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Insufficient zone resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) blk_stat = BLK_STS_ZONE_OPEN_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) action = ACTION_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) case ACTION_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* Give up and fail the remainder of the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!(req->rq_flags & RQF_QUIET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static DEFINE_RATELIMIT_STATE(_rs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) DEFAULT_RATELIMIT_INTERVAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) DEFAULT_RATELIMIT_BURST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (unlikely(scsi_logging_level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) level =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) SCSI_LOG_MLCOMPLETE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * if logging is enabled the failure will be printed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * in scsi_log_completion(), so avoid duplicate messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (!level && __ratelimit(&_rs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) scsi_print_result(cmd, NULL, FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (driver_byte(result) == DRIVER_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) scsi_print_sense(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) scsi_print_command(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) case ACTION_REPREP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) scsi_io_completion_reprep(cmd, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) case ACTION_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Retry the same command immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) case ACTION_DELAYED_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* Retry the same command after a delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * new result that may suppress further error checking. Also modifies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * *blk_statp in some cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) blk_status_t *blk_statp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) bool sense_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) bool sense_current = true; /* false implies "deferred sense" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct request *req = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (sense_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) sense_current = !scsi_sense_is_deferred(&sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (blk_rq_is_passthrough(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (sense_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * SG_IO wants current and deferred errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) scsi_req(req)->sense_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) min(8 + cmd->sense_buffer[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (sense_current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) *blk_statp = scsi_result_to_blk_status(cmd, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) } else if (blk_rq_bytes(req) == 0 && sense_current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * Flush commands do not transfers any data, and thus cannot use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * good_bytes != blk_rq_bytes(req) as the signal for an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * This sets *blk_statp explicitly for the problem case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) *blk_statp = scsi_result_to_blk_status(cmd, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Recovered errors need reporting, but they're always treated as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * success, so fiddle the result code here. For passthrough requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * we already took a copy of the original into sreq->result which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * is what gets returned to the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) bool do_print = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * skip print since caller wants ATA registers. Only occurs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * on SCSI ATA PASS_THROUGH commands when CK_COND=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) do_print = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) else if (req->rq_flags & RQF_QUIET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) do_print = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (do_print)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) scsi_print_sense(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* for passthrough, *blk_statp may be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) *blk_statp = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * Another corner case: the SCSI status byte is non-zero but 'good'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * intermediate statuses (both obsolete in SAM-4) as good.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (status_byte(result) && scsi_status_is_good(result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) *blk_statp = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * scsi_io_completion - Completion processing for SCSI commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * @cmd: command that is finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * @good_bytes: number of processed bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * We will finish off the specified number of sectors. If we are done, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * command block will be released and the queue function will be goosed. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * are not done then we have to figure out what to do next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * a) We can call scsi_io_completion_reprep(). The request will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * unprepared and put back on the queue. Then a new command will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * be created for it. This should be used if we made forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * progress, or if we want to switch from READ(10) to READ(6) for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * example.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * b) We can call scsi_io_completion_action(). The request will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * put back on the queue and retried using the same command as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * before, possibly after a delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * c) We can call scsi_end_request() with blk_stat other than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * BLK_STS_OK, to fail the remainder of the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) int result = cmd->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct request_queue *q = cmd->device->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct request *req = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) blk_status_t blk_stat = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (unlikely(result)) /* a nz result may or may not be an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) result = scsi_io_completion_nz_result(cmd, result, &blk_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (unlikely(blk_rq_is_passthrough(req))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * scsi_result_to_blk_status may have reset the host_byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) scsi_req(req)->result = cmd->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * Next deal with any sectors which we were able to correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) "%u sectors total, %d bytes done.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) blk_rq_sectors(req), good_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * Failed, zero length commands always need to drop down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * to retry code. Fast path should return in this block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (likely(!scsi_end_request(req, blk_stat, good_bytes)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return; /* no bytes remaining */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /* Kill remainder if no retries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (scsi_end_request(req, blk_stat, blk_rq_bytes(req)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) WARN_ONCE(true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) "Bytes remaining after failed, no-retry command");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * If there had been no error, but we have leftover bytes in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * requeues just queue the command up again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (likely(result == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) scsi_io_completion_reprep(cmd, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) scsi_io_completion_action(cmd, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) !op_is_write(req_op(rq)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) sdev->host->hostt->dma_need_drain(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * scsi_alloc_sgtables - allocate S/G tables for a command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * @cmd: command descriptor we wish to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * * BLK_STS_OK - on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * * BLK_STS_RESOURCE - if the failure is retryable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * * BLK_STS_IOERR - if the failure is fatal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct scsi_device *sdev = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct scatterlist *last_sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (WARN_ON_ONCE(!nr_segs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * Make sure there is space for the drain. The driver must adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * max_hw_segments to be prepared for this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (need_drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) nr_segs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * If sg table allocation fails, requeue request later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * Next, walk the list, and fill in the addresses and sizes of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * each segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) unsigned int pad_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) last_sg->length += pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) cmd->extra_len += pad_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (need_drain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) sg_unmark_end(last_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) last_sg = sg_next(last_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) sg_mark_end(last_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) cmd->extra_len += sdev->dma_drain_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) BUG_ON(count > cmd->sdb.table.nents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) cmd->sdb.table.nents = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) cmd->sdb.length = blk_rq_payload_bytes(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (blk_integrity_rq(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) int ivecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (WARN_ON_ONCE(!prot_sdb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * This can happen if someone (e.g. multipath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * queues a command to a device on an adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * that does not support DIX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ret = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) goto out_free_sgtables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) prot_sdb->table.sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) SCSI_INLINE_PROT_SG_CNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ret = BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) goto out_free_sgtables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) count = blk_rq_map_integrity_sg(rq->q, rq->bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) prot_sdb->table.sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) BUG_ON(count > ivecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) BUG_ON(count > queue_max_integrity_segments(rq->q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) cmd->prot_sdb = prot_sdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) cmd->prot_sdb->table.nents = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) out_free_sgtables:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) scsi_free_sgtables(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) EXPORT_SYMBOL(scsi_alloc_sgtables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * scsi_initialize_rq - initialize struct scsi_cmnd partially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * @rq: Request associated with the SCSI command to be initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * This function initializes the members of struct scsi_cmnd that must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * initialized before request processing starts and that won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * reinitialized if a SCSI command is requeued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * Called from inside blk_get_request() for pass-through requests and from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * inside scsi_init_command() for filesystem requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static void scsi_initialize_rq(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) scsi_req_init(&cmd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) init_rcu_head(&cmd->rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) cmd->jiffies_at_alloc = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) cmd->retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * Only called when the request isn't completed by SCSI, and not freed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * SCSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static void scsi_cleanup_rq(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (rq->rq_flags & RQF_DONTPREP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) rq->rq_flags &= ~RQF_DONTPREP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /* Called before a request is prepared. See also scsi_mq_prep_fn(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) void *buf = cmd->sense_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) void *prot = cmd->prot_sdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) struct request *rq = blk_mq_rq_from_pdu(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) unsigned long jiffies_at_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) int retries, to_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) bool in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) flags |= SCMD_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) scsi_initialize_rq(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) jiffies_at_alloc = cmd->jiffies_at_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) retries = cmd->retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * Zero out the cmd, except for the embedded scsi_request. Only clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * the driver-private command data if the LLD does not supply a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * function to initialize that data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) to_clear = sizeof(*cmd) - sizeof(cmd->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (!dev->host->hostt->init_cmd_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) to_clear += dev->host->hostt->cmd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) memset((char *)cmd + sizeof(cmd->req), 0, to_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) cmd->device = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) cmd->sense_buffer = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) cmd->prot_sdb = prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) cmd->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) cmd->jiffies_at_alloc = jiffies_at_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) cmd->retries = retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * Passthrough requests may transfer data, in which case they must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * a bio attached to them. Or they might contain a SCSI command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * that does not transfer data, in which case they may optionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * submit a request without an attached bio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (req->bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) blk_status_t ret = scsi_alloc_sgtables(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (unlikely(ret != BLK_STS_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) BUG_ON(blk_rq_bytes(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) memset(&cmd->sdb, 0, sizeof(cmd->sdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) cmd->cmd_len = scsi_req(req)->cmd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) cmd->cmnd = scsi_req(req)->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) cmd->transfersize = blk_rq_bytes(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) cmd->allowed = scsi_req(req)->retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static blk_status_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) scsi_device_state_check(struct scsi_device *sdev, struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) switch (sdev->sdev_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) case SDEV_CREATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) case SDEV_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) case SDEV_TRANSPORT_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * If the device is offline we refuse to process any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * commands. The device must be brought online
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * before trying any recovery commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (!sdev->offline_already) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) sdev->offline_already = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) sdev_printk(KERN_ERR, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) "rejecting I/O to offline device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) case SDEV_DEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * If the device is fully deleted, we refuse to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * process any commands as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) sdev_printk(KERN_ERR, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) "rejecting I/O to dead device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) case SDEV_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) case SDEV_CREATED_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) case SDEV_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * If the device is blocked we only accept power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * For any other not fully online state we only allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * power management commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (req && !(req->rq_flags & RQF_PM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * Called with the queue_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static inline int scsi_dev_queue_ready(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) unsigned int busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) busy = atomic_inc_return(&sdev->device_busy) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (atomic_read(&sdev->device_blocked)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) goto out_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * unblock after device_blocked iterates to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (atomic_dec_return(&sdev->device_blocked) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) goto out_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) "unblocking device at zero depth\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (busy >= sdev->queue_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) goto out_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) out_dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) atomic_dec(&sdev->device_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * scsi_target_queue_ready: checks if there we can send commands to target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * @sdev: scsi device on starget to check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct scsi_target *starget = scsi_target(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) unsigned int busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (starget->single_lun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (starget->starget_sdev_user &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) starget->starget_sdev_user != sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) starget->starget_sdev_user = sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (starget->can_queue <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) busy = atomic_inc_return(&starget->target_busy) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (atomic_read(&starget->target_blocked) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) goto starved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * unblock after target_blocked iterates to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (atomic_dec_return(&starget->target_blocked) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) goto out_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) "unblocking target at zero depth\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (busy >= starget->can_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) goto starved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) starved:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) list_move_tail(&sdev->starved_entry, &shost->starved_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) out_dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (starget->can_queue > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) atomic_dec(&starget->target_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * scsi_host_queue_ready: if we can send requests to shost, return 1 else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * return 0. We must end up running the queue again whenever 0 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * returned, else IO can hang.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static inline int scsi_host_queue_ready(struct request_queue *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) struct scsi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (scsi_host_in_recovery(shost))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (atomic_read(&shost->host_blocked) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (scsi_host_busy(shost) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) goto starved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * unblock after host_blocked iterates to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (atomic_dec_return(&shost->host_blocked) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) goto out_dec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) SCSI_LOG_MLQUEUE(3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) "unblocking host at zero depth\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (shost->host_self_blocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) goto starved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* We're OK to process the command, so we can't be starved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (!list_empty(&sdev->starved_entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (!list_empty(&sdev->starved_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) list_del_init(&sdev->starved_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) __set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) starved:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (list_empty(&sdev->starved_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) list_add_tail(&sdev->starved_entry, &shost->starved_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) out_dec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) scsi_dec_host_busy(shost, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * Busy state exporting function for request stacking drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * For efficiency, no lock is taken to check the busy state of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * shost/starget/sdev, since the returned value is not guaranteed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * may be changed after request stacking drivers call the function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * regardless of taking lock or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * needs to return 'not busy'. Otherwise, request stacking drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * may hold requests forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static bool scsi_mq_lld_busy(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) struct scsi_device *sdev = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (blk_queue_dying(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) shost = sdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * Ignore host/starget busy state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * Since block layer does not have a concept of fairness across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * multiple queues, congestion of host/starget needs to be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * in SCSI layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static void scsi_softirq_done(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) int disposition;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) INIT_LIST_HEAD(&cmd->eh_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) atomic_inc(&cmd->device->iodone_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (cmd->result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) atomic_inc(&cmd->device->ioerr_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) disposition = scsi_decide_disposition(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) disposition = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) scsi_log_completion(cmd, disposition);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) switch (disposition) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) case SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) scsi_finish_command(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) case NEEDS_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) case ADD_TO_MLQUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) scsi_eh_scmd_add(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * scsi_dispatch_command - Dispatch a command to the low-level driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * @cmd: command block we are dispatching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * Return: nonzero return request was rejected and device's queue needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * plugged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct Scsi_Host *host = cmd->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) int rtn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) atomic_inc(&cmd->device->iorequest_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /* check if the device is still usable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) /* in SDEV_DEL we error all commands. DID_NO_CONNECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * returns an immediate error upwards, and signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * that the device is no longer present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* Check to see if the scsi lld made this device blocked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (unlikely(scsi_device_blocked(cmd->device))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * in blocked state, the command is just put back on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * the device queue. The suspend state has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * blocked the queue so future requests should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * occur until the device transitions out of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * suspend state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) "queuecommand : device blocked\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return SCSI_MLQUEUE_DEVICE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /* Store the LUN value in cmnd, if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (cmd->device->lun_in_cdb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) (cmd->device->lun << 5 & 0xe0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) scsi_log_send(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * Before we queue this command, check if the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * length exceeds what the host adapter can handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) "queuecommand : command too long. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) "cdb_size=%d host->max_cmd_len=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) cmd->cmd_len, cmd->device->host->max_cmd_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) cmd->result = (DID_ABORT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (unlikely(host->shost_state == SHOST_DEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) cmd->result = (DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) trace_scsi_dispatch_cmd_start(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) rtn = host->hostt->queuecommand(host, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (rtn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) trace_scsi_dispatch_cmd_error(cmd, rtn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) rtn != SCSI_MLQUEUE_TARGET_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) rtn = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) "queuecommand : request rejected\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return rtn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) /* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) sizeof(struct scatterlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) static blk_status_t scsi_prepare_cmd(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct scsi_device *sdev = req->q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct Scsi_Host *shost = sdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) scsi_init_command(sdev, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) cmd->request = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) cmd->tag = req->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) cmd->prot_op = SCSI_PROT_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (blk_rq_bytes(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) cmd->sc_data_direction = rq_dma_dir(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) cmd->sc_data_direction = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) cmd->sdb.table.sgl = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (scsi_host_get_prot(shost)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) cmd->prot_sdb->table.sgl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) (struct scatterlist *)(cmd->prot_sdb + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * Special handling for passthrough commands, which don't go to the ULP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * at all:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (blk_rq_is_scsi(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return scsi_setup_scsi_cmnd(sdev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (sdev->handler && sdev->handler->prep_fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) blk_status_t ret = sdev->handler->prep_fn(sdev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (ret != BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) memset(cmd->cmnd, 0, BLK_MAX_CDB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return scsi_cmd_to_driver(cmd)->init_command(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static void scsi_mq_done(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (unlikely(blk_should_fake_timeout(cmd->request->q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) trace_scsi_dispatch_cmd_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) blk_mq_complete_request(cmd->request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) static void scsi_mq_put_budget(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct scsi_device *sdev = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) atomic_dec(&sdev->device_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) static bool scsi_mq_get_budget(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct scsi_device *sdev = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (scsi_dev_queue_ready(q, sdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) atomic_inc(&sdev->restarts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * .restarts must be incremented before .device_busy is read because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * code in scsi_run_queue_async() depends on the order of these operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * If all in-flight requests originated from this LUN are completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * before reading .device_busy, sdev->device_busy will be observed as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * zero, then blk_mq_delay_run_hw_queues() will dispatch this request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * soon. Otherwise, completion of one of these requests will observe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * the .restarts flag, and the request queue will be run for handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * this request, see scsi_end_request().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (unlikely(atomic_read(&sdev->device_busy) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) !scsi_device_blocked(sdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct request *req = bd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) struct request_queue *q = req->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) struct scsi_device *sdev = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) struct Scsi_Host *shost = sdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) int reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * If the device is not in running state we will reject some or all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) ret = scsi_device_state_check(sdev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (ret != BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) goto out_put_budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) ret = BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (!scsi_target_queue_ready(shost, sdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) goto out_put_budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (!scsi_host_queue_ready(q, shost, sdev, cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) goto out_dec_target_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (!(req->rq_flags & RQF_DONTPREP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) ret = scsi_prepare_cmd(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (ret != BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) goto out_dec_host_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) req->rq_flags |= RQF_DONTPREP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) cmd->flags &= SCMD_PRESERVED_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (sdev->simple_tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) cmd->flags |= SCMD_TAGGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (bd->last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) cmd->flags |= SCMD_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) scsi_set_resid(cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) cmd->scsi_done = scsi_mq_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) blk_mq_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) reason = scsi_dispatch_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (reason) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) scsi_set_blocked(cmd, reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) ret = BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) goto out_dec_host_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) out_dec_host_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) scsi_dec_host_busy(shost, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) out_dec_target_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (scsi_target(sdev)->can_queue > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) atomic_dec(&scsi_target(sdev)->target_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) out_put_budget:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) scsi_mq_put_budget(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) case BLK_STS_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) case BLK_STS_RESOURCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) case BLK_STS_ZONE_RESOURCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (scsi_device_blocked(sdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ret = BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (unlikely(!scsi_device_online(sdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) scsi_req(req)->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) scsi_req(req)->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * Make sure to release all allocated resources when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * we hit an error, as we will never see this command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (req->rq_flags & RQF_DONTPREP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) scsi_mq_uninit_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) scsi_run_queue_async(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) static enum blk_eh_timer_return scsi_timeout(struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) bool reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return BLK_EH_RESET_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return scsi_times_out(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) unsigned int hctx_idx, unsigned int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) struct Scsi_Host *shost = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) const bool unchecked_isa_dma = shost->unchecked_isa_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (unchecked_isa_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) GFP_KERNEL, numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (!cmd->sense_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) cmd->req.sense = cmd->sense_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (scsi_host_get_prot(shost)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) sg = (void *)cmd + sizeof(struct scsi_cmnd) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) shost->hostt->cmd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (shost->hostt->init_cmd_priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) ret = shost->hostt->init_cmd_priv(shost, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) scsi_free_sense_buffer(unchecked_isa_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) cmd->sense_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) unsigned int hctx_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) struct Scsi_Host *shost = set->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (shost->hostt->exit_cmd_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) shost->hostt->exit_cmd_priv(shost, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) cmd->sense_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static int scsi_map_queues(struct blk_mq_tag_set *set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (shost->hostt->map_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) return shost->hostt->map_queues(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) struct device *dev = shost->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * this limit is imposed by hardware restrictions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) SG_MAX_SEGMENTS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (scsi_host_prot_dma(shost)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) shost->sg_prot_tablesize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) min_not_zero(shost->sg_prot_tablesize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (dev->dma_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) shost->max_sectors = min_t(unsigned int, shost->max_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) dma_max_mapping_size(dev) >> SECTOR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) blk_queue_max_hw_sectors(q, shost->max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (shost->unchecked_isa_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) blk_queue_segment_boundary(q, shost->dma_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) dma_set_seg_boundary(dev, shost->dma_boundary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) blk_queue_max_segment_size(q, shost->max_segment_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) blk_queue_virt_boundary(q, shost->virt_boundary_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) dma_set_max_seg_size(dev, queue_max_segment_size(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * Set a reasonable default alignment: The larger of 32-byte (dword),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) * which is a common minimum for HBAs, and the minimum DMA alignment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) * which is set by the platform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) * Devices that require a bigger alignment can increase it later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) EXPORT_SYMBOL_GPL(__scsi_init_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) static const struct blk_mq_ops scsi_mq_ops_no_commit = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) .get_budget = scsi_mq_get_budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) .put_budget = scsi_mq_put_budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) .queue_rq = scsi_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) .complete = scsi_softirq_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) .timeout = scsi_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) #ifdef CONFIG_BLK_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) .show_rq = scsi_show_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) .init_request = scsi_mq_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) .exit_request = scsi_mq_exit_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) .initialize_rq_fn = scsi_initialize_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) .cleanup_rq = scsi_cleanup_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) .busy = scsi_mq_lld_busy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) .map_queues = scsi_map_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) struct scsi_device *sdev = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) struct Scsi_Host *shost = sdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) shost->hostt->commit_rqs(shost, hctx->queue_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) static const struct blk_mq_ops scsi_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) .get_budget = scsi_mq_get_budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) .put_budget = scsi_mq_put_budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) .queue_rq = scsi_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) .commit_rqs = scsi_commit_rqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) .complete = scsi_softirq_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) .timeout = scsi_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) #ifdef CONFIG_BLK_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) .show_rq = scsi_show_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) .init_request = scsi_mq_init_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) .exit_request = scsi_mq_exit_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) .initialize_rq_fn = scsi_initialize_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) .cleanup_rq = scsi_cleanup_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) .busy = scsi_mq_lld_busy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) .map_queues = scsi_map_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (IS_ERR(sdev->request_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) sdev->request_queue->queuedata = sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) __scsi_init_queue(sdev->host, sdev->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return sdev->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) int scsi_mq_setup_tags(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) unsigned int cmd_size, sgl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) struct blk_mq_tag_set *tag_set = &shost->tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) sgl_size = max_t(unsigned int, sizeof(struct scatterlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) scsi_mq_inline_sgl_size(shost));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (scsi_host_get_prot(shost))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) cmd_size += sizeof(struct scsi_data_buffer) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) memset(tag_set, 0, sizeof(*tag_set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (shost->hostt->commit_rqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) tag_set->ops = &scsi_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) tag_set->ops = &scsi_mq_ops_no_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) tag_set->queue_depth = shost->can_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) tag_set->cmd_size = cmd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) tag_set->numa_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) tag_set->flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) tag_set->driver_data = shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (shost->host_tagset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) return blk_mq_alloc_tag_set(tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) void scsi_mq_destroy_tags(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) blk_mq_free_tag_set(&shost->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * scsi_device_from_queue - return sdev associated with a request_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * @q: The request queue to return the sdev from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * Return the sdev associated with a request queue or NULL if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * request_queue does not reference a SCSI device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct scsi_device *scsi_device_from_queue(struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) struct scsi_device *sdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) if (q->mq_ops == &scsi_mq_ops_no_commit ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) q->mq_ops == &scsi_mq_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) sdev = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (!sdev || !get_device(&sdev->sdev_gendev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) sdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * scsi_block_requests - Utility function used by low-level drivers to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * further commands from being queued to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * @shost: host in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * There is no timer nor any other means by which the requests get unblocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * other than the low-level driver calling scsi_unblock_requests().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) void scsi_block_requests(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) shost->host_self_blocked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) EXPORT_SYMBOL(scsi_block_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * scsi_unblock_requests - Utility function used by low-level drivers to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * further commands to be queued to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * @shost: host in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * There is no timer nor any other means by which the requests get unblocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * other than the low-level driver calling scsi_unblock_requests(). This is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * as an API function so that changes to the internals of the scsi mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * won't require wholesale changes to drivers that use this feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) void scsi_unblock_requests(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) shost->host_self_blocked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) scsi_run_host_queues(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) EXPORT_SYMBOL(scsi_unblock_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) void scsi_exit_queue(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) kmem_cache_destroy(scsi_sense_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) kmem_cache_destroy(scsi_sense_isadma_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * scsi_mode_select - issue a mode select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * @sdev: SCSI device to be queried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * @pf: Page format bit (1 == standard, 0 == vendor specific)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * @sp: Save page bit (0 == don't save, 1 == save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * @modepage: mode page being requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * @buffer: request buffer (may not be smaller than eight bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * @len: length of request buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * @timeout: command timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * @retries: number of retries before failing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * @data: returns a structure abstracting the mode header data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * @sshdr: place to put sense data (or NULL if no sense to be collected).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * must be SCSI_SENSE_BUFFERSIZE big.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) * Returns zero if successful; negative error number or scsi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) * status on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) unsigned char *buffer, int len, int timeout, int retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) unsigned char cmd[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) unsigned char *real_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) memset(cmd, 0, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (sdev->use_10_for_ms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (len > 65535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) real_buffer = kmalloc(8 + len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (!real_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) memcpy(real_buffer + 8, buffer, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) len += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) real_buffer[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) real_buffer[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) real_buffer[2] = data->medium_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) real_buffer[3] = data->device_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) real_buffer[4] = data->longlba ? 0x01 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) real_buffer[5] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) real_buffer[6] = data->block_descriptor_length >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) real_buffer[7] = data->block_descriptor_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) cmd[0] = MODE_SELECT_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) cmd[7] = len >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) cmd[8] = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (len > 255 || data->block_descriptor_length > 255 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) data->longlba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) real_buffer = kmalloc(4 + len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (!real_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) memcpy(real_buffer + 4, buffer, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) len += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) real_buffer[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) real_buffer[1] = data->medium_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) real_buffer[2] = data->device_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) real_buffer[3] = data->block_descriptor_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) cmd[0] = MODE_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) cmd[4] = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) sshdr, timeout, retries, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) kfree(real_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) EXPORT_SYMBOL_GPL(scsi_mode_select);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * @sdev: SCSI device to be queried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) * @dbd: set if mode sense will allow block descriptors to be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * @modepage: mode page being requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * @buffer: request buffer (may not be smaller than eight bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) * @len: length of request buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * @timeout: command timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * @retries: number of retries before failing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) * @data: returns a structure abstracting the mode header data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * @sshdr: place to put sense data (or NULL if no sense to be collected).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * must be SCSI_SENSE_BUFFERSIZE big.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * Returns zero if successful, or a negative error number on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) unsigned char *buffer, int len, int timeout, int retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) unsigned char cmd[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) int use_10_for_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) int header_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) int result, retry_count = retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) struct scsi_sense_hdr my_sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) memset(data, 0, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) memset(&cmd[0], 0, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) dbd = sdev->set_dbd_for_ms ? 8 : dbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) cmd[2] = modepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /* caller might not be interested in sense, but we need it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) if (!sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) sshdr = &my_sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) use_10_for_ms = sdev->use_10_for_ms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (use_10_for_ms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (len < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) cmd[0] = MODE_SENSE_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) cmd[8] = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) header_length = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (len < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) cmd[0] = MODE_SENSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) cmd[4] = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) header_length = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) memset(buffer, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) sshdr, timeout, retries, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) /* This code looks awful: what it's doing is making sure an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * ILLEGAL REQUEST sense return identifies the actual command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * byte as the problem. MODE_SENSE commands can return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * ILLEGAL REQUEST if the code page isn't supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (use_10_for_ms && !scsi_status_is_good(result) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) driver_byte(result) == DRIVER_SENSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (scsi_sense_valid(sshdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * Invalid command operation code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) sdev->use_10_for_ms = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) if (scsi_status_is_good(result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) (modepage == 6 || modepage == 8))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /* Initio breakage? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) header_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) data->length = 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) data->medium_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) data->device_specific = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) data->longlba = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) data->block_descriptor_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) } else if (use_10_for_ms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) data->length = buffer[0]*256 + buffer[1] + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) data->medium_type = buffer[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) data->device_specific = buffer[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) data->longlba = buffer[4] & 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) data->block_descriptor_length = buffer[6]*256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) + buffer[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) data->length = buffer[0] + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) data->medium_type = buffer[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) data->device_specific = buffer[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) data->block_descriptor_length = buffer[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) data->header_length = header_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) } else if ((status_byte(result) == CHECK_CONDITION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) scsi_sense_valid(sshdr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) sshdr->sense_key == UNIT_ATTENTION && retry_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) retry_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) if (result > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) EXPORT_SYMBOL(scsi_mode_sense);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) * scsi_test_unit_ready - test if unit is ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) * @sdev: scsi device to change the state of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) * @timeout: command timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) * @retries: number of retries before failing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) * @sshdr: outpout pointer for decoded sense information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * Returns zero if unsuccessful or an error if TUR failed. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * removable media, UNIT_ATTENTION sets ->changed flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) struct scsi_sense_hdr *sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) char cmd[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) TEST_UNIT_READY, 0, 0, 0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) /* try to eat the UNIT_ATTENTION if there are enough retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) timeout, 1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (sdev->removable && scsi_sense_valid(sshdr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) sshdr->sense_key == UNIT_ATTENTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) sdev->changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) } while (scsi_sense_valid(sshdr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) sshdr->sense_key == UNIT_ATTENTION && --retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) EXPORT_SYMBOL(scsi_test_unit_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) * scsi_device_set_state - Take the given device through the device state model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) * @sdev: scsi device to change the state of.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) * @state: state to change to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * Returns zero if successful or an error if the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * transition is illegal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) enum scsi_device_state oldstate = sdev->sdev_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (state == oldstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) case SDEV_CREATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) switch (oldstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) case SDEV_CREATED_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) goto illegal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) case SDEV_RUNNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) switch (oldstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) case SDEV_CREATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) case SDEV_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) case SDEV_TRANSPORT_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) case SDEV_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) case SDEV_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) goto illegal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) case SDEV_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) switch (oldstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) case SDEV_RUNNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) case SDEV_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) case SDEV_TRANSPORT_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) goto illegal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) case SDEV_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) case SDEV_TRANSPORT_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) switch (oldstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) case SDEV_CREATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) case SDEV_RUNNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) case SDEV_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) case SDEV_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) goto illegal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) case SDEV_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) switch (oldstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) case SDEV_RUNNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) case SDEV_CREATED_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) case SDEV_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) case SDEV_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) goto illegal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) case SDEV_CREATED_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) switch (oldstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) case SDEV_CREATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) goto illegal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) case SDEV_CANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) switch (oldstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) case SDEV_CREATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) case SDEV_RUNNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) case SDEV_QUIESCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) case SDEV_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) case SDEV_TRANSPORT_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) goto illegal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) case SDEV_DEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) switch (oldstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) case SDEV_CREATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) case SDEV_RUNNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) case SDEV_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) case SDEV_TRANSPORT_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) case SDEV_CANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) case SDEV_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) case SDEV_CREATED_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) goto illegal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) sdev->offline_already = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) sdev->sdev_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) illegal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) SCSI_LOG_ERROR_RECOVERY(1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) sdev_printk(KERN_ERR, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) "Illegal state transition %s->%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) scsi_device_state_name(oldstate),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) scsi_device_state_name(state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) EXPORT_SYMBOL(scsi_device_set_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) * sdev_evt_emit - emit a single SCSI device uevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) * @sdev: associated SCSI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) * @evt: event to emit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * Send a single uevent (scsi_event) to the associated scsi_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) int idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) char *envp[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) switch (evt->evt_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) case SDEV_EVT_MEDIA_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) envp[idx++] = "SDEV_MEDIA_CHANGE=1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) scsi_rescan_device(&sdev->sdev_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) case SDEV_EVT_LUN_CHANGE_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) case SDEV_EVT_POWER_ON_RESET_OCCURRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) /* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) envp[idx++] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * sdev_evt_thread - send a uevent for each scsi event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) * @work: work struct for scsi_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * Dispatch queued events to their associated scsi_device kobjects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) * as uevents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) void scsi_evt_thread(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) enum scsi_device_event evt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) LIST_HEAD(event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) sdev = container_of(work, struct scsi_device, event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (test_and_clear_bit(evt_type, sdev->pending_events))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) struct scsi_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) struct list_head *this, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) spin_lock_irqsave(&sdev->list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) list_splice_init(&sdev->event_list, &event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) spin_unlock_irqrestore(&sdev->list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) if (list_empty(&event_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) list_for_each_safe(this, tmp, &event_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) evt = list_entry(this, struct scsi_event, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) list_del(&evt->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) scsi_evt_emit(sdev, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) kfree(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) * sdev_evt_send - send asserted event to uevent thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) * @sdev: scsi_device event occurred on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) * @evt: event to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) * Assert scsi device event asynchronously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) /* FIXME: currently this check eliminates all media change events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) * for polled devices. Need to update to discriminate between AN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) * and polled events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (!test_bit(evt->evt_type, sdev->supported_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) kfree(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) spin_lock_irqsave(&sdev->list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) list_add_tail(&evt->node, &sdev->event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) schedule_work(&sdev->event_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) spin_unlock_irqrestore(&sdev->list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) EXPORT_SYMBOL_GPL(sdev_evt_send);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) * sdev_evt_alloc - allocate a new scsi event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * @evt_type: type of event to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * @gfpflags: GFP flags for allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) * Allocates and returns a new scsi_event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) gfp_t gfpflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (!evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) evt->evt_type = evt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) INIT_LIST_HEAD(&evt->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) /* evt_type-specific initialization, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) switch (evt_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) case SDEV_EVT_MEDIA_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) case SDEV_EVT_LUN_CHANGE_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) case SDEV_EVT_POWER_ON_RESET_OCCURRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) /* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) return evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) EXPORT_SYMBOL_GPL(sdev_evt_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) * sdev_evt_send_simple - send asserted event to uevent thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) * @sdev: scsi_device event occurred on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) * @evt_type: type of event to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) * @gfpflags: GFP flags for allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) * Assert scsi device event asynchronously, given an event type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) void sdev_evt_send_simple(struct scsi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) enum scsi_device_event evt_type, gfp_t gfpflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) if (!evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) evt_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) sdev_evt_send(sdev, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * scsi_device_quiesce - Block all commands except power management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * @sdev: scsi device to quiesce.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * This works by trying to transition to the SDEV_QUIESCE state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * (which must be a legal transition). When the device is in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) * state, only power management requests will be accepted, all others will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) * be deferred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) * Must be called with user context, may sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) * Returns zero if unsuccessful or an error if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) scsi_device_quiesce(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) struct request_queue *q = sdev->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) * It is allowed to call scsi_device_quiesce() multiple times from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) * the same context but concurrent scsi_device_quiesce() calls are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) * not allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (sdev->quiesced_by == current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) blk_set_pm_only(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) blk_mq_freeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) * Ensure that the effect of blk_set_pm_only() will be visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) * for percpu_ref_tryget() callers that occur after the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * unfreeze even if the queue was already frozen before this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) * was called. See also https://lwn.net/Articles/573497/.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) blk_mq_unfreeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) mutex_lock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) err = scsi_device_set_state(sdev, SDEV_QUIESCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) sdev->quiesced_by = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) blk_clear_pm_only(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) mutex_unlock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) EXPORT_SYMBOL(scsi_device_quiesce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) * scsi_device_resume - Restart user issued commands to a quiesced device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) * @sdev: scsi device to resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) * Moves the device from quiesced back to running and restarts the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) * queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) * Must be called with user context, may sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) void scsi_device_resume(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) /* check if the device state was mutated prior to resume, and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) * so assume the state is being managed elsewhere (for example
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) * device deleted during suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) mutex_lock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) if (sdev->sdev_state == SDEV_QUIESCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) scsi_device_set_state(sdev, SDEV_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (sdev->quiesced_by) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) sdev->quiesced_by = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) blk_clear_pm_only(sdev->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) mutex_unlock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) EXPORT_SYMBOL(scsi_device_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) device_quiesce_fn(struct scsi_device *sdev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) scsi_device_quiesce(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) scsi_target_quiesce(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) starget_for_each_device(starget, NULL, device_quiesce_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) EXPORT_SYMBOL(scsi_target_quiesce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) device_resume_fn(struct scsi_device *sdev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) scsi_device_resume(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) scsi_target_resume(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) starget_for_each_device(starget, NULL, device_resume_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) EXPORT_SYMBOL(scsi_target_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) * @sdev: device to block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) * Pause SCSI command processing on the specified device. Does not sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) * Returns zero if successful or a negative error code upon failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) * Notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) * This routine transitions the device to the SDEV_BLOCK state (which must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) * a legal transition). When the device is in this state, command processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * is paused until the device leaves the SDEV_BLOCK state. See also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) * scsi_internal_device_unblock_nowait().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) int scsi_internal_device_block_nowait(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) struct request_queue *q = sdev->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) err = scsi_device_set_state(sdev, SDEV_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * The device has transitioned to SDEV_BLOCK. Stop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * block layer from calling the midlayer with this device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) * request queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) blk_mq_quiesce_queue_nowait(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * scsi_internal_device_block - try to transition to the SDEV_BLOCK state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) * @sdev: device to block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * Pause SCSI command processing on the specified device and wait until all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) * Returns zero if successful or a negative error code upon failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) * This routine transitions the device to the SDEV_BLOCK state (which must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) * a legal transition). When the device is in this state, command processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) * is paused until the device leaves the SDEV_BLOCK state. See also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) * scsi_internal_device_unblock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) static int scsi_internal_device_block(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) struct request_queue *q = sdev->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) mutex_lock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) err = scsi_internal_device_block_nowait(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) blk_mq_quiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) mutex_unlock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) void scsi_start_queue(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) struct request_queue *q = sdev->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) blk_mq_unquiesce_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) * scsi_internal_device_unblock_nowait - resume a device after a block request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) * @sdev: device to resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) * @new_state: state to set the device to after unblocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) * Restart the device queue for a previously suspended SCSI device. Does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) * sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * Returns zero if successful or a negative error code upon failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) * Notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) * This routine transitions the device to the SDEV_RUNNING state or to one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) * the offline states (which must be a legal transition) allowing the midlayer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) * to goose the queue for this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) enum scsi_device_state new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) switch (new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) case SDEV_RUNNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) case SDEV_TRANSPORT_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) * Try to transition the scsi device to SDEV_RUNNING or one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) * offlined states and goose the device queue if successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) switch (sdev->sdev_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) case SDEV_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) case SDEV_TRANSPORT_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) sdev->sdev_state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) case SDEV_CREATED_BLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) if (new_state == SDEV_TRANSPORT_OFFLINE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) new_state == SDEV_OFFLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) sdev->sdev_state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) sdev->sdev_state = SDEV_CREATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) case SDEV_CANCEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) case SDEV_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) scsi_start_queue(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) * scsi_internal_device_unblock - resume a device after a block request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) * @sdev: device to resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) * @new_state: state to set the device to after unblocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) * Restart the device queue for a previously suspended SCSI device. May sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) * Returns zero if successful or a negative error code upon failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) * Notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) * This routine transitions the device to the SDEV_RUNNING state or to one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) * the offline states (which must be a legal transition) allowing the midlayer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) * to goose the queue for this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) static int scsi_internal_device_unblock(struct scsi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) enum scsi_device_state new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) mutex_lock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) ret = scsi_internal_device_unblock_nowait(sdev, new_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) mutex_unlock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) device_block(struct scsi_device *sdev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) ret = scsi_internal_device_block(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) dev_name(&sdev->sdev_gendev), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) target_block(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) if (scsi_is_target_device(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) starget_for_each_device(to_scsi_target(dev), NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) device_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) scsi_target_block(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) if (scsi_is_target_device(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) starget_for_each_device(to_scsi_target(dev), NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) device_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) device_for_each_child(dev, NULL, target_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) EXPORT_SYMBOL_GPL(scsi_target_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) device_unblock(struct scsi_device *sdev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) target_unblock(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) if (scsi_is_target_device(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) starget_for_each_device(to_scsi_target(dev), data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) device_unblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) if (scsi_is_target_device(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) starget_for_each_device(to_scsi_target(dev), &new_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) device_unblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) device_for_each_child(dev, &new_state, target_unblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) EXPORT_SYMBOL_GPL(scsi_target_unblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) scsi_host_block(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) * Call scsi_internal_device_block_nowait so we can avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) * calling synchronize_rcu() for each LUN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) shost_for_each_device(sdev, shost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) mutex_lock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) ret = scsi_internal_device_block_nowait(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) mutex_unlock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) * calling synchronize_rcu() once is enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) EXPORT_SYMBOL_GPL(scsi_host_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) scsi_host_unblock(struct Scsi_Host *shost, int new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) shost_for_each_device(sdev, shost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) ret = scsi_internal_device_unblock(sdev, new_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) EXPORT_SYMBOL_GPL(scsi_host_unblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) * @sgl: scatter-gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) * @sg_count: number of segments in sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) * @offset: offset in bytes into sg, on return offset into the mapped area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) * @len: bytes to map, on return number of bytes mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) * Returns virtual address of the start of the mapped page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) size_t *offset, size_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) size_t sg_len = 0, len_complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) WARN_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) for_each_sg(sgl, sg, sg_count, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) len_complete = sg_len; /* Complete sg-entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) sg_len += sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) if (sg_len > *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) if (unlikely(i == sg_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) "elements %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) __func__, sg_len, *offset, sg_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) /* Offset starting from the beginning of first page in this sg-entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) *offset = *offset - len_complete + sg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) /* Assumption: contiguous pages can be accessed as "page + i" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) *offset &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) /* Bytes in this sg-entry from *offset to the end of the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) sg_len = PAGE_SIZE - *offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) if (*len > sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) *len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) return kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) EXPORT_SYMBOL(scsi_kmap_atomic_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) * @virt: virtual address to be unmapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) void scsi_kunmap_atomic_sg(void *virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) kunmap_atomic(virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) void sdev_disable_disk_events(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) atomic_inc(&sdev->disk_events_disable_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) EXPORT_SYMBOL(sdev_disable_disk_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) void sdev_enable_disk_events(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) atomic_dec(&sdev->disk_events_disable_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) EXPORT_SYMBOL(sdev_enable_disk_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) static unsigned char designator_prio(const unsigned char *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) if (d[1] & 0x30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) /* not associated with LUN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) if (d[3] == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) /* invalid length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) * Order of preference for lun descriptor:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) * - SCSI name string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) * - NAA IEEE Registered Extended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) * - EUI-64 based 16-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) * - EUI-64 based 12-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) * - NAA IEEE Registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) * - NAA IEEE Extended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) * - EUI-64 based 8-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) * - SCSI name string (truncated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) * - T10 Vendor ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) * as longer descriptors reduce the likelyhood
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) * of identification clashes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) switch (d[1] & 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) /* SCSI name string, variable-length UTF-8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) return 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) switch (d[4] >> 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) /* NAA registered extended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) return 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) /* NAA registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) return 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) /* NAA extended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) /* NAA locally assigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) switch (d[3]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) /* EUI64-based, 16 byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) return 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) /* EUI64-based, 12 byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) /* EUI64-based, 8 byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) return 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) /* T10 vendor ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) * scsi_vpd_lun_id - return a unique device identification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) * @sdev: SCSI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) * @id: buffer for the identification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) * @id_len: length of the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) * Copies a unique device identification into @id based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) * on the information in the VPD page 0x83 of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) * The string will be formatted as a SCSI name string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) * Returns the length of the identification or error on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) * If the identifier is longer than the supplied buffer the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) * identifier length is returned and the buffer is not zero-padded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) u8 cur_id_prio = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) u8 cur_id_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) const unsigned char *d, *cur_id_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) const struct scsi_vpd *vpd_pg83;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) int id_size = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) if (!vpd_pg83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) /* The id string must be at least 20 bytes + terminating NULL byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) if (id_len < 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) memset(id, 0, id_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) d = vpd_pg83->data + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) while (d < vpd_pg83->data + vpd_pg83->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) u8 prio = designator_prio(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) if (prio == 0 || cur_id_prio > prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) goto next_desig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) switch (d[1] & 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) case 0x1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) /* T10 Vendor ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) if (cur_id_size > d[3])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) cur_id_prio = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) cur_id_size = d[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) if (cur_id_size + 4 > id_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) cur_id_size = id_len - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) cur_id_str = d + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) id_size = snprintf(id, id_len, "t10.%*pE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) cur_id_size, cur_id_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) case 0x2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) /* EUI-64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) cur_id_prio = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) cur_id_size = d[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) cur_id_str = d + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) switch (cur_id_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) id_size = snprintf(id, id_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) "eui.%8phN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) cur_id_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) id_size = snprintf(id, id_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) "eui.%12phN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) cur_id_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) id_size = snprintf(id, id_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) "eui.%16phN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) cur_id_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) case 0x3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) /* NAA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) cur_id_prio = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) cur_id_size = d[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) cur_id_str = d + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) switch (cur_id_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) id_size = snprintf(id, id_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) "naa.%8phN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) cur_id_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) id_size = snprintf(id, id_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) "naa.%16phN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) cur_id_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) case 0x8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) /* SCSI name string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) if (cur_id_size > d[3])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) /* Prefer others for truncated descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) if (d[3] > id_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) prio = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) if (cur_id_prio > prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) cur_id_prio = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) cur_id_size = id_size = d[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) cur_id_str = d + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) if (cur_id_size >= id_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) cur_id_size = id_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) memcpy(id, cur_id_str, cur_id_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) next_desig:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) d += d[3] + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) return id_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) EXPORT_SYMBOL(scsi_vpd_lun_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) * scsi_vpd_tpg_id - return a target port group identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) * @sdev: SCSI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) * Returns the Target Port Group identifier from the information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) * froom VPD page 0x83 of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) * Returns the identifier or error on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) const unsigned char *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) const struct scsi_vpd *vpd_pg83;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) int group_id = -EAGAIN, rel_port = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) vpd_pg83 = rcu_dereference(sdev->vpd_pg83);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) if (!vpd_pg83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) d = vpd_pg83->data + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) while (d < vpd_pg83->data + vpd_pg83->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) switch (d[1] & 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) case 0x4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) /* Relative target port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) rel_port = get_unaligned_be16(&d[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) case 0x5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) /* Target port group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) group_id = get_unaligned_be16(&d[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) d += d[3] + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) if (group_id >= 0 && rel_id && rel_port != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) *rel_id = rel_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) return group_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) EXPORT_SYMBOL(scsi_vpd_tpg_id);