^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * sd.c Copyright (C) 1992 Drew Eckhardt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Linux scsi disk driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Initial versions: Drew Eckhardt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Subsequent revisions: Eric Youngdale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Modification history:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * - Drew Eckhardt <drew@colorado.edu> original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * - Eric Youngdale <eric@andante.org> add scatter-gather, multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * outstanding request, and other enhancements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Support loadable low-level scsi drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * eight major numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * sd_init and cleanups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * - Alex Davis <letmein@erols.com> Fix problem where partition info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * not being read in sd_open. Fix problem where removable media
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * could be ejected after sd_open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * <willy@debian.org>, Kurt Garloff <garloff@suse.de>:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Support 32k/1M disks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Logging policy (needs CONFIG_SCSI_LOGGING defined):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - entering sd_ioctl: SCSI_LOG_IOCTL level 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * - entering other commands: SCSI_LOG_HLQUEUE level 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Note: when the logging level is set by the user, it must be greater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * than the level indicated above to trigger output.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/genhd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/blkpg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/blk-pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/string_helpers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/async.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/sed-opal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/pr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/t10-pi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <scsi/scsi_dbg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <scsi/scsi_driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <scsi/scsi_eh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <scsi/scsi_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <scsi/scsicam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include "sd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include "scsi_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include "scsi_logging.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) MODULE_AUTHOR("Eric Youngdale");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) MODULE_DESCRIPTION("SCSI disk (sd) driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define SD_MINORS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define SD_MINORS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void sd_config_discard(struct scsi_disk *, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void sd_config_write_same(struct scsi_disk *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int sd_revalidate_disk(struct gendisk *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void sd_unlock_native_capacity(struct gendisk *disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int sd_probe(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static int sd_remove(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void sd_shutdown(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static int sd_suspend_system(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int sd_suspend_runtime(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static int sd_resume(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void sd_rescan(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void sd_uninit_command(struct scsi_cmnd *SCpnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static int sd_done(struct scsi_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static void sd_eh_reset(struct scsi_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static int sd_eh_action(struct scsi_cmnd *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void scsi_disk_release(struct device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static DEFINE_IDA(sd_index_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* This semaphore is used to mediate the 0->1 reference get in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * face of object destruction (i.e. we can't allow a get on an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * object after last put) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static DEFINE_MUTEX(sd_ref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static struct kmem_cache *sd_cdb_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static mempool_t *sd_cdb_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static mempool_t *sd_page_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static const char *sd_cache_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) "write through", "none", "write back",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) "write back, no read (daft)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void sd_set_flush_flag(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) bool wc = false, fua = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (sdkp->WCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) wc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (sdkp->DPOFUA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) fua = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) blk_queue_write_cache(sdkp->disk->queue, wc, fua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) cache_type_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int ct, rcd, wce, sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) char buffer[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) char *buffer_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct scsi_mode_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static const char temp[] = "temporary ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* no cache control on RBC devices; theoretically they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * can do it, but there's probably so many exceptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * it's not worth the risk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) buf += sizeof(temp) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) sdkp->cache_override = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) sdkp->cache_override = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ct = sysfs_match_string(sd_cache_types, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (ct < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) rcd = ct & 0x01 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (sdkp->cache_override) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) sdkp->WCE = wce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) sdkp->RCD = rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) sd_set_flush_flag(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) sdkp->max_retries, &data, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) data.block_descriptor_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) buffer_data = buffer + data.header_length +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) data.block_descriptor_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) buffer_data[2] &= ~0x05;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) buffer_data[2] |= wce << 2 | rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) sp = buffer_data[0] & 0x80 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) buffer_data[0] &= ~0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Ensure WP, DPOFUA, and RESERVED fields are cleared in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * received mode parameter buffer before doing MODE SELECT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) data.device_specific = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) sdkp->max_retries, &data, &sshdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (scsi_sense_valid(&sshdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) sd_print_sense_hdr(sdkp, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) sd_revalidate_disk(sdkp->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) manage_start_stop_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return sprintf(buf, "%u\n", sdp->manage_start_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) manage_start_stop_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) bool v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (kstrtobool(buf, &v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) sdp->manage_start_stop = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static DEVICE_ATTR_RW(manage_start_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return sprintf(buf, "%u\n", sdkp->device->allow_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) allow_restart_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) bool v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (kstrtobool(buf, &v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) sdp->allow_restart = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static DEVICE_ATTR_RW(allow_restart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int ct = sdkp->RCD + 2*sdkp->WCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return sprintf(buf, "%s\n", sd_cache_types[ct]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static DEVICE_ATTR_RW(cache_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return sprintf(buf, "%u\n", sdkp->DPOFUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static DEVICE_ATTR_RO(FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) protection_type_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return sprintf(buf, "%u\n", sdkp->protection_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) protection_type_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) err = kstrtouint(buf, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (val <= T10_PI_TYPE3_PROTECTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) sdkp->protection_type = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static DEVICE_ATTR_RW(protection_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) protection_mode_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned int dif, dix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) dif = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dix = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (!dif && !dix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return sprintf(buf, "none\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static DEVICE_ATTR_RO(protection_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return sprintf(buf, "%u\n", sdkp->ATO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static DEVICE_ATTR_RO(app_tag_own);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) thin_provisioning_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return sprintf(buf, "%u\n", sdkp->lbpme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static DEVICE_ATTR_RO(thin_provisioning);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* sysfs_match_string() requires dense arrays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static const char *lbp_mode[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) [SD_LBP_FULL] = "full",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) [SD_LBP_UNMAP] = "unmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) [SD_LBP_WS16] = "writesame_16",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) [SD_LBP_WS10] = "writesame_10",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) [SD_LBP_ZERO] = "writesame_zero",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) [SD_LBP_DISABLE] = "disabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) provisioning_mode_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) provisioning_mode_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (sd_is_zoned(sdkp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) sd_config_discard(sdkp, SD_LBP_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (sdp->type != TYPE_DISK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) mode = sysfs_match_string(lbp_mode, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (mode < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) sd_config_discard(sdkp, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static DEVICE_ATTR_RW(provisioning_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* sysfs_match_string() requires dense arrays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static const char *zeroing_mode[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) [SD_ZERO_WRITE] = "write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) [SD_ZERO_WS] = "writesame",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) [SD_ZERO_WS16_UNMAP] = "writesame_16_unmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) [SD_ZERO_WS10_UNMAP] = "writesame_10_unmap",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) zeroing_mode_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) zeroing_mode_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) mode = sysfs_match_string(zeroing_mode, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (mode < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) sdkp->zeroing_mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static DEVICE_ATTR_RW(zeroing_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) max_medium_access_timeouts_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) max_medium_access_timeouts_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct device_attribute *attr, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return err ? err : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static DEVICE_ATTR_RW(max_medium_access_timeouts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned long max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) err = kstrtoul(buf, 10, &max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (max == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) sdp->no_write_same = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) else if (max <= SD_MAX_WS16_BLOCKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) sdp->no_write_same = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) sdkp->max_ws_blocks = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) sd_config_write_same(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static DEVICE_ATTR_RW(max_write_same_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (sdkp->device->type == TYPE_ZBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return sprintf(buf, "host-managed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (sdkp->zoned == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return sprintf(buf, "host-aware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (sdkp->zoned == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return sprintf(buf, "drive-managed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return sprintf(buf, "none\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static DEVICE_ATTR_RO(zoned_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) max_retries_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct scsi_device *sdev = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) int retries, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) err = kstrtoint(buf, 10, &retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) sdkp->max_retries = retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) SD_MAX_RETRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) max_retries_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return sprintf(buf, "%d\n", sdkp->max_retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static DEVICE_ATTR_RW(max_retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static struct attribute *sd_disk_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) &dev_attr_cache_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) &dev_attr_FUA.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) &dev_attr_allow_restart.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) &dev_attr_manage_start_stop.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) &dev_attr_protection_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) &dev_attr_protection_mode.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) &dev_attr_app_tag_own.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) &dev_attr_thin_provisioning.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) &dev_attr_provisioning_mode.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) &dev_attr_zeroing_mode.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) &dev_attr_max_write_same_blocks.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) &dev_attr_max_medium_access_timeouts.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) &dev_attr_zoned_cap.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) &dev_attr_max_retries.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ATTRIBUTE_GROUPS(sd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static struct class sd_disk_class = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) .name = "scsi_disk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .dev_release = scsi_disk_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .dev_groups = sd_disk_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static const struct dev_pm_ops sd_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) .suspend = sd_suspend_system,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) .resume = sd_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) .poweroff = sd_suspend_system,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) .restore = sd_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) .runtime_suspend = sd_suspend_runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) .runtime_resume = sd_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static struct scsi_driver sd_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) .gendrv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) .name = "sd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) .probe = sd_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) .probe_type = PROBE_PREFER_ASYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) .remove = sd_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) .shutdown = sd_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) .pm = &sd_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) .rescan = sd_rescan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) .init_command = sd_init_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) .uninit_command = sd_uninit_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) .done = sd_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) .eh_action = sd_eh_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) .eh_reset = sd_eh_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * Dummy kobj_map->probe function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * The default ->probe function will call modprobe, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * pointless as this module is already loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * Device no to disk mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * major disc2 disc p1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * |............|.............|....|....| <- dev_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * 31 20 19 8 7 4 3 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * Inside a major, we have 16k disks, however mapped non-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * contiguously. The first 16 disks are for major0, the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * ones with major1, ... Disk 256 is for major0 again, disk 272
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * for major1, ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * As we stay compatible with our numbering scheme, we can reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * the well-know SCSI majors 8, 65--71, 136--143.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static int sd_major(int major_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) switch (major_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return SCSI_DISK0_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) case 1 ... 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return SCSI_DISK1_MAJOR + major_idx - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) case 8 ... 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return SCSI_DISK8_MAJOR + major_idx - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return 0; /* shut up gcc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static struct scsi_disk *scsi_disk_get(struct gendisk *disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct scsi_disk *sdkp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) mutex_lock(&sd_ref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (disk->private_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) sdkp = scsi_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (scsi_device_get(sdkp->device) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) get_device(&sdkp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) sdkp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) mutex_unlock(&sd_ref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return sdkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static void scsi_disk_put(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct scsi_device *sdev = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) mutex_lock(&sd_ref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) put_device(&sdkp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) mutex_unlock(&sd_ref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #ifdef CONFIG_BLK_SED_OPAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) size_t len, bool send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct scsi_disk *sdkp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct scsi_device *sdev = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u8 cdb[12] = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) cdb[1] = secp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) put_unaligned_be16(spsp, &cdb[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) put_unaligned_be32(len, &cdb[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) RQF_PM, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return ret <= 0 ? ret : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) #endif /* CONFIG_BLK_SED_OPAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * Look up the DIX operation based on whether the command is read or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * write and whether dix and dif are enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static unsigned int sd_prot_op(bool write, bool dix, bool dif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static const unsigned int ops[] = { /* wrt dix dif */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) SCSI_PROT_NORMAL, /* 0 0 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) SCSI_PROT_READ_STRIP, /* 0 0 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) SCSI_PROT_READ_INSERT, /* 0 1 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) SCSI_PROT_READ_PASS, /* 0 1 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) SCSI_PROT_NORMAL, /* 1 0 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) SCSI_PROT_WRITE_INSERT, /* 1 0 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) SCSI_PROT_WRITE_STRIP, /* 1 1 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) SCSI_PROT_WRITE_PASS, /* 1 1 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return ops[write << 2 | dix << 1 | dif];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * Returns a mask of the protection flags that are valid for a given DIX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static unsigned int sd_prot_flag_mask(unsigned int prot_op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static const unsigned int flag_mask[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) [SCSI_PROT_NORMAL] = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) SCSI_PROT_GUARD_CHECK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) SCSI_PROT_REF_CHECK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) SCSI_PROT_REF_INCREMENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) SCSI_PROT_IP_CHECKSUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) SCSI_PROT_GUARD_CHECK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) SCSI_PROT_REF_CHECK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) SCSI_PROT_REF_INCREMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) SCSI_PROT_IP_CHECKSUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) SCSI_PROT_REF_INCREMENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) SCSI_PROT_REF_CHECK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) SCSI_PROT_REF_INCREMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) SCSI_PROT_IP_CHECKSUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) SCSI_PROT_GUARD_CHECK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) SCSI_PROT_REF_CHECK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) SCSI_PROT_REF_INCREMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) SCSI_PROT_IP_CHECKSUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return flag_mask[prot_op];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) unsigned int dix, unsigned int dif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct bio *bio = scmd->request->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) unsigned int protect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (dix) { /* DIX Type 0, 1, 2, 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) scmd->prot_flags |= SCSI_PROT_REF_CHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (dif) { /* DIX/DIF Type 1, 2, 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) protect = 3 << 5; /* Disable target PI checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) protect = 1 << 5; /* Enable target PI checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) scsi_set_prot_op(scmd, prot_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) scsi_set_prot_type(scmd, dif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) scmd->prot_flags &= sd_prot_flag_mask(prot_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return protect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct request_queue *q = sdkp->disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) unsigned int logical_block_size = sdkp->device->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) unsigned int max_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) q->limits.discard_alignment =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) sdkp->unmap_alignment * logical_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) q->limits.discard_granularity =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) max(sdkp->physical_block_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) sdkp->unmap_granularity * logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) sdkp->provisioning_mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) case SD_LBP_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) case SD_LBP_DISABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) blk_queue_max_discard_sectors(q, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) case SD_LBP_UNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) max_blocks = min_not_zero(sdkp->max_unmap_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) (u32)SD_MAX_WS16_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) case SD_LBP_WS16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (sdkp->device->unmap_limit_for_ws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) max_blocks = sdkp->max_unmap_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) max_blocks = sdkp->max_ws_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) case SD_LBP_WS10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (sdkp->device->unmap_limit_for_ws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) max_blocks = sdkp->max_unmap_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) max_blocks = sdkp->max_ws_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) case SD_LBP_ZERO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) max_blocks = min_not_zero(sdkp->max_ws_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) (u32)SD_MAX_WS10_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct scsi_device *sdp = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) unsigned int data_len = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (!rq->special_vec.bv_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) clear_highpage(rq->special_vec.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) rq->special_vec.bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) rq->special_vec.bv_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) cmd->cmd_len = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) cmd->cmnd[0] = UNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) cmd->cmnd[8] = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) buf = page_address(rq->special_vec.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) put_unaligned_be16(6 + 16, &buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) put_unaligned_be16(16, &buf[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) put_unaligned_be64(lba, &buf[8]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) put_unaligned_be32(nr_blocks, &buf[16]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) cmd->allowed = sdkp->max_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) cmd->transfersize = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) rq->timeout = SD_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return scsi_alloc_sgtables(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) bool unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct scsi_device *sdp = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) u32 data_len = sdp->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (!rq->special_vec.bv_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) clear_highpage(rq->special_vec.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) rq->special_vec.bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) rq->special_vec.bv_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) cmd->cmd_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) cmd->cmnd[0] = WRITE_SAME_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) cmd->cmnd[1] = 0x8; /* UNMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) put_unaligned_be64(lba, &cmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) cmd->allowed = sdkp->max_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) cmd->transfersize = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return scsi_alloc_sgtables(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) bool unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct scsi_device *sdp = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) u32 data_len = sdp->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (!rq->special_vec.bv_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) clear_highpage(rq->special_vec.bv_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) rq->special_vec.bv_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) rq->special_vec.bv_len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) cmd->cmd_len = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) cmd->cmnd[0] = WRITE_SAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) cmd->cmnd[1] = 0x8; /* UNMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) put_unaligned_be32(lba, &cmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cmd->allowed = sdkp->max_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) cmd->transfersize = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return scsi_alloc_sgtables(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct scsi_device *sdp = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (!(rq->cmd_flags & REQ_NOUNMAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) switch (sdkp->zeroing_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) case SD_ZERO_WS16_UNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return sd_setup_write_same16_cmnd(cmd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) case SD_ZERO_WS10_UNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return sd_setup_write_same10_cmnd(cmd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (sdp->no_write_same) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) rq->rq_flags |= RQF_QUIET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return BLK_STS_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return sd_setup_write_same16_cmnd(cmd, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return sd_setup_write_same10_cmnd(cmd, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static void sd_config_write_same(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct request_queue *q = sdkp->disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) unsigned int logical_block_size = sdkp->device->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (sdkp->device->no_write_same) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) sdkp->max_ws_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /* Some devices can not handle block counts above 0xffff despite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * supporting WRITE SAME(16). Consequently we default to 64k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * blocks per I/O unless the device explicitly advertises a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * bigger limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) (u32)SD_MAX_WS16_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) (u32)SD_MAX_WS10_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) sdkp->device->no_write_same = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) sdkp->max_ws_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (sdkp->lbprz && sdkp->lbpws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) else if (sdkp->lbprz && sdkp->lbpws10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) else if (sdkp->max_ws_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) sdkp->zeroing_mode = SD_ZERO_WS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) sdkp->zeroing_mode = SD_ZERO_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (sdkp->max_ws_blocks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) sdkp->physical_block_size > logical_block_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * Reporting a maximum number of blocks that is not aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * on the device physical size would cause a large write same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * request to be split into physically unaligned chunks by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * even if the caller of these functions took care to align the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * large request. So make sure the maximum reported is aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * to the device physical block size. This is only an optional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * optimization for regular disks, but this is mandatory to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * avoid failure of large write same requests directed at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * sequential write required zones of host-managed ZBC disks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) sdkp->max_ws_blocks =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) round_down(sdkp->max_ws_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) bytes_to_logical(sdkp->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) sdkp->physical_block_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) (logical_block_size >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) (logical_block_size >> 9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * sd_setup_write_same_cmnd - write the same data to multiple blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * @cmd: command to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * the preference indicated by the target device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct scsi_device *sdp = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) struct bio *bio = rq->bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (sdkp->device->no_write_same)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return BLK_STS_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) rq->timeout = SD_WRITE_SAME_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) cmd->cmd_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) cmd->cmnd[0] = WRITE_SAME_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) put_unaligned_be64(lba, &cmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) cmd->cmd_len = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) cmd->cmnd[0] = WRITE_SAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) put_unaligned_be32(lba, &cmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) cmd->transfersize = sdp->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) cmd->allowed = sdkp->max_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * For WRITE SAME the data transferred via the DATA OUT buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * different from the amount of data actually written to the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * We set up __data_len to the amount of data transferred via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * to transfer a single sector of data first, but then reset it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * the amount of data to be written right after so that the I/O path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * knows how much to actually write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) rq->__data_len = sdp->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) ret = scsi_alloc_sgtables(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) rq->__data_len = blk_rq_bytes(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* flush requests don't perform I/O, zero the S/G table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) memset(&cmd->sdb, 0, sizeof(cmd->sdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) cmd->cmnd[0] = SYNCHRONIZE_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) cmd->cmd_len = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) cmd->transfersize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) cmd->allowed = sdkp->max_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) sector_t lba, unsigned int nr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) unsigned char flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (unlikely(cmd->cmnd == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) cmd->cmd_len = SD_EXT_CDB_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) memset(cmd->cmnd, 0, cmd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) cmd->cmnd[7] = 0x18; /* Additional CDB len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) cmd->cmnd[9] = write ? WRITE_32 : READ_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) cmd->cmnd[10] = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) put_unaligned_be64(lba, &cmd->cmnd[12]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) sector_t lba, unsigned int nr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) unsigned char flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) cmd->cmd_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) cmd->cmnd[0] = write ? WRITE_16 : READ_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) cmd->cmnd[1] = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) cmd->cmnd[14] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) cmd->cmnd[15] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) put_unaligned_be64(lba, &cmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) sector_t lba, unsigned int nr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) unsigned char flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) cmd->cmd_len = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) cmd->cmnd[0] = write ? WRITE_10 : READ_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) cmd->cmnd[1] = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) cmd->cmnd[6] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) cmd->cmnd[9] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) put_unaligned_be32(lba, &cmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) sector_t lba, unsigned int nr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) unsigned char flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /* Avoid that 0 blocks gets translated into 256 blocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (WARN_ON_ONCE(nr_blocks == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (unlikely(flags & 0x8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * This happens only if this drive failed 10byte rw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * command with ILLEGAL_REQUEST during operation and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * thus turned off use_10_for_rw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) cmd->cmd_len = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) cmd->cmnd[0] = write ? WRITE_6 : READ_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) cmd->cmnd[1] = (lba >> 16) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) cmd->cmnd[2] = (lba >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) cmd->cmnd[3] = lba & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) cmd->cmnd[4] = nr_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) cmd->cmnd[5] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) struct scsi_device *sdp = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) sector_t threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) unsigned int mask = logical_to_sectors(sdp, 1) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) bool write = rq_data_dir(rq) == WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) unsigned char protect, fua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) blk_status_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) unsigned int dif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) bool dix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ret = scsi_alloc_sgtables(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (ret != BLK_STS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) ret = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (!scsi_device_online(sdp) || sdp->changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * Some SD card readers can't handle accesses which touch the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * last one or two logical blocks. Split accesses as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (lba < threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /* Access up to the threshold but not beyond */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) nr_blocks = threshold - lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /* Access only a single logical block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) nr_blocks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (req_op(rq) == REQ_OP_ZONE_APPEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) dix = scsi_prot_sg_count(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (dif || dix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) protect = sd_setup_protect_cmnd(cmd, dix, dif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) protect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) protect | fua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) protect | fua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) sdp->use_10_for_rw || protect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) protect | fua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) protect | fua);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (unlikely(ret != BLK_STS_OK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * We shouldn't disconnect in the middle of a sector, so with a dumb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * host adapter, it's safe to assume that we can at least transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * this many bytes between each connect / disconnect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) cmd->transfersize = sdp->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) cmd->underflow = nr_blocks << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) cmd->allowed = sdkp->max_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) cmd->sdb.length = nr_blocks * sdp->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) SCSI_LOG_HLQUEUE(1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) scmd_printk(KERN_INFO, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) "%s: block=%llu, count=%d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) (unsigned long long)blk_rq_pos(rq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) blk_rq_sectors(rq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) SCSI_LOG_HLQUEUE(2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) scmd_printk(KERN_INFO, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) "%s %d/%u 512 byte blocks.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) write ? "writing" : "reading", nr_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) blk_rq_sectors(rq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * This indicates that the command is ready from our end to be queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) scsi_free_sgtables(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct request *rq = cmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) switch (req_op(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) case SD_LBP_UNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) return sd_setup_unmap_cmnd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) case SD_LBP_WS16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) return sd_setup_write_same16_cmnd(cmd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) case SD_LBP_WS10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return sd_setup_write_same10_cmnd(cmd, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) case SD_LBP_ZERO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return sd_setup_write_same10_cmnd(cmd, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return BLK_STS_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return sd_setup_write_zeroes_cmnd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) case REQ_OP_WRITE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return sd_setup_write_same_cmnd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) case REQ_OP_FLUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return sd_setup_flush_cmnd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) case REQ_OP_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) case REQ_OP_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) case REQ_OP_ZONE_APPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return sd_setup_read_write_cmnd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) case REQ_OP_ZONE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) case REQ_OP_ZONE_RESET_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) case REQ_OP_ZONE_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) case REQ_OP_ZONE_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) case REQ_OP_ZONE_FINISH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) return BLK_STS_NOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static void sd_uninit_command(struct scsi_cmnd *SCpnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) struct request *rq = SCpnt->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) u8 *cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) mempool_free(rq->special_vec.bv_page, sd_page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (SCpnt->cmnd != scsi_req(rq)->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) cmnd = SCpnt->cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) SCpnt->cmnd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) SCpnt->cmd_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) mempool_free(cmnd, sd_cdb_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static bool sd_need_revalidate(struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (sdkp->device->removable || sdkp->write_prot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (bdev_check_media_change(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * Force a full rescan after ioctl(BLKRRPART). While the disk state has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * nothing to do with partitions, BLKRRPART is used to force a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * revalidate after things like a format for historical reasons.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * sd_open - open a scsi disk device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * @bdev: Block device of the scsi disk to open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * @mode: FMODE_* mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * Returns 0 if successful. Returns a negated errno value in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * Note: This can be called from a user context (e.g. fsck(1) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * or from within the kernel (e.g. as a result of a mount(1) ).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * In the latter case @inode and @filp carry an abridged amount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * of information as noted above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * Locking: called with bdev->bd_mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static int sd_open(struct block_device *bdev, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (!sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) sdev = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * If the device is in error recovery, wait until it is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * If the device is offline, then disallow any access to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) retval = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (!scsi_block_when_processing_errors(sdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (sd_need_revalidate(bdev, sdkp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) sd_revalidate_disk(bdev->bd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * If the drive is empty, just let the open fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) retval = -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * If the device has the write protect tab set, have the open fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * if the user expects to be able to write to the thing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) retval = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (sdkp->write_prot && (mode & FMODE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * It is possible that the disk changing stuff resulted in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * the device being taken offline. If this is the case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * report this to the user, and don't pretend that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * open actually succeeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) retval = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (!scsi_device_online(sdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (scsi_block_when_processing_errors(sdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) scsi_disk_put(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * sd_release - invoked when the (last) close(2) is called on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * scsi disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) * @disk: disk to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * @mode: FMODE_* mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * Returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * Note: may block (uninterruptible) if error recovery is underway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * on this disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * Locking: called with bdev->bd_mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static void sd_release(struct gendisk *disk, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) struct scsi_disk *sdkp = scsi_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct scsi_device *sdev = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (scsi_block_when_processing_errors(sdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) scsi_disk_put(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct Scsi_Host *host = sdp->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) int diskinfo[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /* default to most commonly used values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) diskinfo[0] = 0x40; /* 1 << 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) diskinfo[1] = 0x20; /* 1 << 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) diskinfo[2] = capacity >> 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /* override with calculated, extended default, or driver values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (host->hostt->bios_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) scsicam_bios_param(bdev, capacity, diskinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) geo->heads = diskinfo[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) geo->sectors = diskinfo[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) geo->cylinders = diskinfo[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * sd_ioctl - process an ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * @bdev: target block device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) * @mode: FMODE_* mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * @cmd: ioctl command number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * @p: this is third argument given to ioctl(2) system call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * Often contains a pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * Returns 0 if successful (some ioctls return positive numbers on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * success as well). Returns a negated errno value in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * Note: most ioctls are forward onto the block subsystem or further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * down in the scsi subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) static int sd_ioctl_common(struct block_device *bdev, fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) unsigned int cmd, void __user *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) struct gendisk *disk = bdev->bd_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) struct scsi_disk *sdkp = scsi_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) "cmd=0x%x\n", disk->disk_name, cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) error = scsi_verify_blk_ioctl(bdev, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * If we are in the middle of error recovery, don't let anyone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * else try and use this device. Also, if error recovery fails, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * may try and take the device offline, in which case all further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * access to the device is prohibited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) (mode & FMODE_NDELAY) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (is_sed_ioctl(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return sed_ioctl(sdkp->opal_dev, cmd, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * Send SCSI addressing ioctls directly to mid level, send other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * ioctls to block level and then onto mid level if they can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * resolved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) case SCSI_IOCTL_GET_IDLUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) case SCSI_IOCTL_GET_BUS_NUMBER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) error = scsi_ioctl(sdp, cmd, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) static void set_media_not_present(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (sdkp->media_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) sdkp->device->changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (sdkp->device->removable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) sdkp->media_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) sdkp->capacity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static int media_not_present(struct scsi_disk *sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct scsi_sense_hdr *sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (!scsi_sense_valid(sshdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /* not invoked for commands that could return deferred errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) switch (sshdr->sense_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) case UNIT_ATTENTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) case NOT_READY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /* medium not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (sshdr->asc == 0x3A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) set_media_not_present(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * sd_check_events - check media events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * @disk: kernel device descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * @clearing: disk events currently being cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * Returns mask of DISK_EVENT_*.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) * Note: this function is invoked from the block subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) struct scsi_disk *sdkp = scsi_disk_get(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) struct scsi_device *sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (!sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * If the device is offline, don't send any commands - just pretend as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * if the command failed. If the device ever comes back online, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * can deal with it then. It is only because of unrecoverable errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * that we would ever take a device offline in the first place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (!scsi_device_online(sdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) set_media_not_present(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * Using TEST_UNIT_READY enables differentiation between drive with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * no cartridge loaded - NOT READY, drive with changed cartridge -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * Drives that auto spin down. eg iomega jaz 1G, will be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * sd_revalidate() is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (scsi_block_when_processing_errors(sdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) struct scsi_sense_hdr sshdr = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) /* failed to execute TUR, assume media not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (host_byte(retval)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) set_media_not_present(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if (media_not_present(sdkp, &sshdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * For removable scsi disk we have to recognise the presence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * of a disk in the drive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) if (!sdkp->media_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) sdp->changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) sdkp->media_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * sdp->changed is set under the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * Medium present state has changed in either direction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * Device has indicated UNIT_ATTENTION.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) sdp->changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) scsi_disk_put(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) int retries, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) const int timeout = sdp->request_queue->rq_timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * SD_FLUSH_TIMEOUT_MULTIPLIER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct scsi_sense_hdr my_sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (!scsi_device_online(sdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) /* caller might not be interested in sense, but we need it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (!sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) sshdr = &my_sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) for (retries = 3; retries > 0; --retries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) unsigned char cmd[10] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) cmd[0] = SYNCHRONIZE_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) * Leave the rest of the command zero to indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * flush everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) timeout, sdkp->max_retries, 0, RQF_PM, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (res == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) if (driver_byte(res) == DRIVER_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) sd_print_sense_hdr(sdkp, sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) /* we need to evaluate the error return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (scsi_sense_valid(sshdr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) (sshdr->asc == 0x3a || /* medium not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) sshdr->asc == 0x20 || /* invalid command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /* this is no error here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) switch (host_byte(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) /* ignore errors due to racing a disconnection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) case DID_BAD_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) case DID_NO_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) /* signal the upper layer it might try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) case DID_BUS_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) case DID_IMM_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) case DID_REQUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) case DID_SOFT_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) static void sd_rescan(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct scsi_disk *sdkp = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) ret = sd_revalidate_disk(sdkp->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) revalidate_disk_size(sdkp->disk, ret == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) static int sd_ioctl(struct block_device *bdev, fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) void __user *p = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) ret = sd_ioctl_common(bdev, mode, cmd, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (ret != -ENOTTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) void __user *p = compat_ptr(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) ret = sd_ioctl_common(bdev, mode, cmd, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (ret != -ENOTTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) static char sd_pr_type(enum pr_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) case PR_WRITE_EXCLUSIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) return 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) case PR_EXCLUSIVE_ACCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) case PR_WRITE_EXCLUSIVE_REG_ONLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) return 0x05;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) case PR_EXCLUSIVE_ACCESS_REG_ONLY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) return 0x06;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) case PR_WRITE_EXCLUSIVE_ALL_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) return 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) case PR_EXCLUSIVE_ACCESS_ALL_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static int sd_pr_command(struct block_device *bdev, u8 sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) u64 key, u64 sa_key, u8 type, u8 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) struct scsi_device *sdev = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) u8 cmd[16] = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) u8 data[24] = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) cmd[0] = PERSISTENT_RESERVE_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) cmd[1] = sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) cmd[2] = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) put_unaligned_be32(sizeof(data), &cmd[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) put_unaligned_be64(key, &data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) put_unaligned_be64(sa_key, &data[8]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) data[20] = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (driver_byte(result) == DRIVER_SENSE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) scsi_sense_valid(&sshdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) scsi_print_sense_hdr(sdev, NULL, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (flags & ~PR_FL_IGNORE_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) old_key, new_key, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) (1 << 0) /* APTPL */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) enum pr_type type, bool abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) sd_pr_type(type), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) static int sd_pr_clear(struct block_device *bdev, u64 key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) static const struct pr_ops sd_pr_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) .pr_register = sd_pr_register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) .pr_reserve = sd_pr_reserve,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) .pr_release = sd_pr_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) .pr_preempt = sd_pr_preempt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) .pr_clear = sd_pr_clear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static const struct block_device_operations sd_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) .open = sd_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) .release = sd_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) .ioctl = sd_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) .getgeo = sd_getgeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) .compat_ioctl = sd_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) .check_events = sd_check_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) .unlock_native_capacity = sd_unlock_native_capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) .report_zones = sd_zbc_report_zones,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) .pr_ops = &sd_pr_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * sd_eh_reset - reset error handling callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * @scmd: sd-issued command that has failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * This function is called by the SCSI midlayer before starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * SCSI EH. When counting medium access failures we have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * careful to register it only only once per device and SCSI EH run;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * there might be several timed out commands which will cause the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * 'max_medium_access_timeouts' counter to trigger after the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * SCSI EH run already and set the device to offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * So this function resets the internal counter before starting SCSI EH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) static void sd_eh_reset(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) /* New SCSI EH run, reset gate variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) sdkp->ignore_medium_access_errors = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * sd_eh_action - error handling callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * @scmd: sd-issued command that has failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * @eh_disp: The recovery disposition suggested by the midlayer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * This function is called by the SCSI midlayer upon completion of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * error test command (currently TEST UNIT READY). The result of sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * the eh command is passed in eh_disp. We're looking for devices that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * fail medium access commands but are OK with non access commands like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) * test unit ready (so wrongly see the device as having a successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * recovery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) struct scsi_device *sdev = scmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (!scsi_device_online(sdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) !scsi_medium_access_command(scmd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) host_byte(scmd->result) != DID_TIME_OUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) eh_disp != SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) return eh_disp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * The device has timed out executing a medium access command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) * However, the TEST UNIT READY command sent during error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * handling completed successfully. Either the device is in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * process of recovering or has it suffered an internal failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * that prevents access to the storage medium.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (!sdkp->ignore_medium_access_errors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) sdkp->medium_access_timed_out++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) sdkp->ignore_medium_access_errors = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * If the device keeps failing read/write commands but TEST UNIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * READY always completes successfully we assume that medium
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * access is no longer possible and take the device offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) scmd_printk(KERN_ERR, scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) "Medium access timeout failure. Offlining disk!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) mutex_lock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) scsi_device_set_state(sdev, SDEV_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) mutex_unlock(&sdev->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) return eh_disp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) struct request *req = scmd->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct scsi_device *sdev = scmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) unsigned int transferred, good_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) u64 start_lba, end_lba, bad_lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * Some commands have a payload smaller than the device logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * block size (e.g. INQUIRY on a 4K disk).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (scsi_bufflen(scmd) <= sdev->sector_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) /* Check if we have a 'bad_lba' information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (!scsi_get_sense_info_fld(scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) SCSI_SENSE_BUFFERSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) &bad_lba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * If the bad lba was reported incorrectly, we have no idea where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * the error is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (bad_lba < start_lba || bad_lba >= end_lba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * resid is optional but mostly filled in. When it's unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) * its value is zero, so we assume the whole buffer transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) /* This computation should always be done in terms of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * resolution of the device's medium.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) return min(good_bytes, transferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * sd_done - bottom half handler: called when the lower level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * driver has completed (successfully or otherwise) a scsi command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * @SCpnt: mid-level's per command structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) * Note: potentially run from within an ISR. Must not block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static int sd_done(struct scsi_cmnd *SCpnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) int result = SCpnt->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) unsigned int sector_size = SCpnt->device->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) unsigned int resid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct request *req = SCpnt->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) int sense_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) int sense_deferred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) switch (req_op(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) case REQ_OP_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) case REQ_OP_WRITE_ZEROES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) case REQ_OP_WRITE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) case REQ_OP_ZONE_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) case REQ_OP_ZONE_RESET_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) case REQ_OP_ZONE_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) case REQ_OP_ZONE_CLOSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) case REQ_OP_ZONE_FINISH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) if (!result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) good_bytes = blk_rq_bytes(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) scsi_set_resid(SCpnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) good_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) scsi_set_resid(SCpnt, blk_rq_bytes(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) * In case of bogus fw or device, we could end up having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * an unaligned partial completion. Check this here and force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) resid = scsi_get_resid(SCpnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (resid & (sector_size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) sd_printk(KERN_INFO, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) resid, sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) scsi_print_command(SCpnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) resid = min(scsi_bufflen(SCpnt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) round_up(resid, sector_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) scsi_set_resid(SCpnt, resid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (sense_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) sense_deferred = scsi_sense_is_deferred(&sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) sdkp->medium_access_timed_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (driver_byte(result) != DRIVER_SENSE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) (!sense_valid || sense_deferred))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) switch (sshdr.sense_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) case HARDWARE_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) case MEDIUM_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) good_bytes = sd_completed_bytes(SCpnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) case RECOVERED_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) good_bytes = scsi_bufflen(SCpnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) case NO_SENSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) /* This indicates a false check condition, so ignore it. An
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * unknown amount of data was transferred so treat it as an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) SCpnt->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) case ABORTED_COMMAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) good_bytes = sd_completed_bytes(SCpnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) case ILLEGAL_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) switch (sshdr.asc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) case 0x10: /* DIX: Host detected corruption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) good_bytes = sd_completed_bytes(SCpnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) case 0x20: /* INVALID COMMAND OPCODE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) case 0x24: /* INVALID FIELD IN CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) switch (SCpnt->cmnd[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) case UNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) sd_config_discard(sdkp, SD_LBP_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) case WRITE_SAME_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) case WRITE_SAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (SCpnt->cmnd[1] & 8) { /* UNMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) sd_config_discard(sdkp, SD_LBP_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) sdkp->device->no_write_same = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) sd_config_write_same(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) req->rq_flags |= RQF_QUIET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) if (sd_is_zoned(sdkp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) "sd_done: completed %d of %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) good_bytes, scsi_bufflen(SCpnt)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return good_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * spinup disk - called only in sd_revalidate_disk()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) sd_spinup_disk(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) unsigned char cmd[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) unsigned long spintime_expire = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) int retries, spintime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) unsigned int the_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) int sense_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) spintime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) /* Spin up drives, as required. Only do this at boot time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /* Spinup needs to be done for module loads too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) cmd[0] = TEST_UNIT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) memset((void *) &cmd[1], 0, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) the_result = scsi_execute_req(sdkp->device, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) DMA_NONE, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) &sshdr, SD_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) sdkp->max_retries, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) * If the drive has indicated to us that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) * doesn't have any media in it, don't bother
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) * with any more polling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (media_not_present(sdkp, &sshdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (the_result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) sense_valid = scsi_sense_valid(&sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) } while (retries < 3 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) (!scsi_status_is_good(the_result) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) ((driver_byte(the_result) == DRIVER_SENSE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) sense_valid && sshdr.sense_key == UNIT_ATTENTION)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) if (driver_byte(the_result) != DRIVER_SENSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) /* no sense, TUR either succeeded or failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) * with a status error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if(!spintime && !scsi_status_is_good(the_result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) sd_print_result(sdkp, "Test Unit Ready failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) the_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) * The device does not want the automatic start to be issued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (sdkp->device->no_start_on_add)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (sense_valid && sshdr.sense_key == NOT_READY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (sshdr.asc == 4 && sshdr.ascq == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) break; /* manual intervention required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (sshdr.asc == 4 && sshdr.ascq == 0xb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) break; /* standby */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if (sshdr.asc == 4 && sshdr.ascq == 0xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) break; /* unavailable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) break; /* sanitize in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * Issue command to spin up drive when not ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (!spintime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) cmd[0] = START_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) cmd[1] = 1; /* Return immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) memset((void *) &cmd[2], 0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) cmd[4] = 1; /* Start spin cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) if (sdkp->device->start_stop_pwr_cond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) cmd[4] |= 1 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) scsi_execute_req(sdkp->device, cmd, DMA_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) NULL, 0, &sshdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) SD_TIMEOUT, sdkp->max_retries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) spintime_expire = jiffies + 100 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) spintime = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) /* Wait 1 second for next try */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) printk(KERN_CONT ".");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * Wait for USB flash devices with slow firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * Yes, this sense key/ASC combination shouldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * occur here. It's characteristic of these devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) } else if (sense_valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) sshdr.sense_key == UNIT_ATTENTION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) sshdr.asc == 0x28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) if (!spintime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) spintime_expire = jiffies + 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) spintime = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) /* Wait 1 second for next try */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) /* we don't understand the sense code, so it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * probably pointless to loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if(!spintime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) sd_print_sense_hdr(sdkp, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) } while (spintime && time_before_eq(jiffies, spintime_expire));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (spintime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (scsi_status_is_good(the_result))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) printk(KERN_CONT "ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) printk(KERN_CONT "not responding...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) * Determine whether disk supports Data Integrity Field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) sdkp->protection_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (type > T10_PI_TYPE3_PROTECTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) else if (scsi_host_dif_capable(sdp->host, type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (sdkp->first_scan || type != sdkp->protection_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) case -ENODEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) " protection type %u. Disabling disk!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) "Enabling DIF Type %u protection\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) "Disabling DIF Type %u protection\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) sdkp->protection_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) struct scsi_sense_hdr *sshdr, int sense_valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) int the_result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (driver_byte(the_result) == DRIVER_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) sd_print_sense_hdr(sdkp, sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * Set dirty bit for removable devices if not ready -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) * sometimes drives will not report this properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (sdp->removable &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) sense_valid && sshdr->sense_key == NOT_READY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) set_media_not_present(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) * We used to set media_present to 0 here to indicate no media
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) * in the drive, but some drives fail read capacity even with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) * media present, so we can't do that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) sdkp->capacity = 0; /* unknown mapped to zero - as usual */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) #define RC16_LEN 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) #if RC16_LEN > SD_BUF_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) #error RC16_LEN must not be more than SD_BUF_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) #define READ_CAPACITY_RETRIES_ON_RESET 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) unsigned char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) unsigned char cmd[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) int sense_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) int the_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) unsigned int alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) unsigned long long lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) unsigned sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) if (sdp->no_read_capacity_16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) memset(cmd, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) cmd[0] = SERVICE_ACTION_IN_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) cmd[1] = SAI_READ_CAPACITY_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) cmd[13] = RC16_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) memset(buffer, 0, RC16_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) buffer, RC16_LEN, &sshdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) SD_TIMEOUT, sdkp->max_retries, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (media_not_present(sdkp, &sshdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) if (the_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) sense_valid = scsi_sense_valid(&sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) if (sense_valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) sshdr.sense_key == ILLEGAL_REQUEST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) sshdr.ascq == 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* Invalid Command Operation Code or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * Invalid Field in CDB, just retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) * silently with RC10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (sense_valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) sshdr.sense_key == UNIT_ATTENTION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) sshdr.asc == 0x29 && sshdr.ascq == 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) /* Device reset might occur several times,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * give it one more chance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) if (--reset_retries > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) retries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) } while (the_result && retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (the_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) sector_size = get_unaligned_be32(&buffer[8]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) lba = get_unaligned_be64(&buffer[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (sd_read_protection_type(sdkp, buffer) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) sdkp->capacity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) /* Logical blocks per physical block exponent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) /* RC basis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) /* Lowest aligned logical block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) blk_queue_alignment_offset(sdp->request_queue, alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (alignment && sdkp->first_scan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) "physical block alignment offset: %u\n", alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) if (buffer[14] & 0x80) { /* LBPME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) sdkp->lbpme = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (buffer[14] & 0x40) /* LBPRZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) sdkp->lbprz = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) sd_config_discard(sdkp, SD_LBP_WS16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) sdkp->capacity = lba + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) unsigned char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) unsigned char cmd[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) int sense_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) int the_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) sector_t lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) unsigned sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) cmd[0] = READ_CAPACITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) memset(&cmd[1], 0, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) memset(buffer, 0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) buffer, 8, &sshdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) SD_TIMEOUT, sdkp->max_retries, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (media_not_present(sdkp, &sshdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (the_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) sense_valid = scsi_sense_valid(&sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) if (sense_valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) sshdr.sense_key == UNIT_ATTENTION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) sshdr.asc == 0x29 && sshdr.ascq == 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) /* Device reset might occur several times,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) * give it one more chance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) if (--reset_retries > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) retries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) } while (the_result && retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (the_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) sector_size = get_unaligned_be32(&buffer[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) lba = get_unaligned_be32(&buffer[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) /* Some buggy (usb cardreader) devices return an lba of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 0xffffffff when the want to report a size of 0 (with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) which they really mean no media is present) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) sdkp->capacity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) sdkp->physical_block_size = sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) return sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) sdkp->capacity = lba + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) sdkp->physical_block_size = sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) return sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) static int sd_try_rc16_first(struct scsi_device *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) if (sdp->host->max_cmd_len < 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) if (sdp->try_rc_10_first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (sdp->scsi_level > SCSI_SPC_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (scsi_device_protection(sdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * read disk capacity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) int sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (sd_try_rc16_first(sdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) sector_size = read_capacity_16(sdkp, sdp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) if (sector_size == -EOVERFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) goto got_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) if (sector_size == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) if (sector_size < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) sector_size = read_capacity_10(sdkp, sdp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) if (sector_size < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) sector_size = read_capacity_10(sdkp, sdp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) if (sector_size == -EOVERFLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) goto got_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (sector_size < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if ((sizeof(sdkp->capacity) > 4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) (sdkp->capacity > 0xffffffffULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) int old_sector_size = sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) sd_printk(KERN_NOTICE, sdkp, "Very big device. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) "Trying to use READ CAPACITY(16).\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) sector_size = read_capacity_16(sdkp, sdp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (sector_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) "Using 0xffffffff as device size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) sdkp->capacity = 1 + (sector_t) 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) sector_size = old_sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) goto got_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) /* Remember that READ CAPACITY(16) succeeded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) sdp->try_rc_10_first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) /* Some devices are known to return the total number of blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) * not the highest block number. Some devices have versions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) * which do this and others which do not. Some devices we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) * suspect of doing this but we don't know for certain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * If we know the reported capacity is wrong, decrement it. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) * we can only guess, then assume the number of blocks is even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) * (usually true but not always) and err on the side of lowering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) * the capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) if (sdp->fix_capacity ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) "from its reported value: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) (unsigned long long) sdkp->capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) --sdkp->capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) got_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) if (sector_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) sector_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) "assuming 512.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (sector_size != 512 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) sector_size != 1024 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) sector_size != 2048 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) sector_size != 4096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) * The user might want to re-format the drive with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) * a supported sectorsize. Once this happens, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) * would be relatively trivial to set the thing up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) * For this reason, we leave the thing in the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) sdkp->capacity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) * set a bogus sector size so the normal read/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) * logic in the block layer will eventually refuse any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) * request on this device without tripping over power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) * of two sector size assumptions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) sector_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) blk_queue_logical_block_size(sdp->request_queue, sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) blk_queue_physical_block_size(sdp->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) sdkp->physical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) sdkp->device->sector_size = sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) if (sdkp->capacity > 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) sdp->use_16_for_rw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) * Print disk capacity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) sd_print_capacity(struct scsi_disk *sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) sector_t old_capacity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) int sector_size = sdkp->device->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) char cap_str_2[10], cap_str_10[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) if (!sdkp->first_scan && old_capacity == sdkp->capacity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) string_get_size(sdkp->capacity, sector_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) string_get_size(sdkp->capacity, sector_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) "%llu %d-byte logical blocks: (%s/%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) (unsigned long long)sdkp->capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) sector_size, cap_str_10, cap_str_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (sdkp->physical_block_size != sector_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) "%u-byte physical blocks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) sdkp->physical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) /* called with buffer of length 512 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) unsigned char *buffer, int len, struct scsi_mode_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) struct scsi_sense_hdr *sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) SD_TIMEOUT, sdkp->max_retries, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * read write protect setting, if possible - called only in sd_revalidate_disk()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) * called with buffer of length SD_BUF_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) struct scsi_mode_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) int old_wp = sdkp->write_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) set_disk_ro(sdkp->disk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) if (sdp->skip_ms_page_3f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) if (sdp->use_192_bytes_for_3f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) * First attempt: ask for all pages (0x3F), but only 4 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) * We have to start carefully: some devices hang if we ask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) * for more than is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) * Second attempt: ask for page 0 When only page 0 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * implemented, a request for page 3F may return Sense Key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) * 5: Illegal Request, Sense Code 24: Invalid field in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) * CDB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) * Third attempt: ask 255 bytes, as we did earlier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) &data, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (res < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) sd_first_printk(KERN_WARNING, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) "Test WP failed, assume Write Enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) sdkp->write_prot = ((data.device_specific & 0x80) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) set_disk_ro(sdkp->disk, sdkp->write_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) if (sdkp->first_scan || old_wp != sdkp->write_prot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) sdkp->write_prot ? "on" : "off");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) * sd_read_cache_type - called only from sd_revalidate_disk()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) * called with buffer of length SD_BUF_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) int len = 0, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) int dbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) int modepage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) int first_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) struct scsi_mode_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) int old_wce = sdkp->WCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) int old_rcd = sdkp->RCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) int old_dpofua = sdkp->DPOFUA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) if (sdkp->cache_override)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) first_len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (sdp->skip_ms_page_8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) if (sdp->type == TYPE_RBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) goto defaults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) if (sdp->skip_ms_page_3f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) goto defaults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) modepage = 0x3F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) if (sdp->use_192_bytes_for_3f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) first_len = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) dbd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) } else if (sdp->type == TYPE_RBC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) modepage = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) dbd = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) modepage = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) dbd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) /* cautiously ask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) &data, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) goto bad_sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) if (!data.header_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) modepage = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) first_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) sd_first_printk(KERN_ERR, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) "Missing header in MODE_SENSE response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) /* that went OK, now ask for the proper length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) len = data.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) * We're only interested in the first three bytes, actually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) * But the data cache page is defined for the first 20.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) if (len < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) goto bad_sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) else if (len > SD_BUF_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) "data from %d to %d bytes\n", len, SD_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) len = SD_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) len = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) /* Get the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) if (len > first_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) &data, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) int offset = data.header_length + data.block_descriptor_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) while (offset < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) u8 page_code = buffer[offset] & 0x3F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) u8 spf = buffer[offset] & 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) if (page_code == 8 || page_code == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) /* We're interested only in the first 3 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) if (len - offset <= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) sd_first_printk(KERN_ERR, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) "Incomplete mode parameter "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) "data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) goto defaults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) modepage = page_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) goto Page_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) /* Go to the next page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) if (spf && len - offset > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) offset += 4 + (buffer[offset+2] << 8) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) buffer[offset+3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) else if (!spf && len - offset > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) offset += 2 + buffer[offset+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) sd_first_printk(KERN_ERR, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) "Incomplete mode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) "parameter data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) goto defaults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) goto defaults;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) Page_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) if (modepage == 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) sdkp->RCD = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (sdp->broken_fua) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) sdkp->DPOFUA = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) !sdkp->device->use_16_for_rw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) sd_first_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) "Uses READ/WRITE(6), disabling FUA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) sdkp->DPOFUA = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) /* No cache flush allowed for write protected devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) if (sdkp->WCE && sdkp->write_prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) sdkp->WCE = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) if (sdkp->first_scan || old_wce != sdkp->WCE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) "Write cache: %s, read cache: %s, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) sdkp->WCE ? "enabled" : "disabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) sdkp->RCD ? "disabled" : "enabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) sdkp->DPOFUA ? "supports DPO and FUA"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) : "doesn't support DPO or FUA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) bad_sense:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (scsi_sense_valid(&sshdr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) sshdr.sense_key == ILLEGAL_REQUEST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) sshdr.asc == 0x24 && sshdr.ascq == 0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) /* Invalid field in CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) sd_first_printk(KERN_ERR, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) "Asking for cache data failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) defaults:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) if (sdp->wce_default_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) sd_first_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) "Assuming drive cache: write back\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) sdkp->WCE = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) sd_first_printk(KERN_ERR, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) "Assuming drive cache: write through\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) sdkp->WCE = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) sdkp->RCD = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) sdkp->DPOFUA = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) * The ATO bit indicates whether the DIF application tag is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) * for use by the operating system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) int res, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) struct scsi_mode_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (sdkp->protection_type == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) sdkp->max_retries, &data, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) if (res < 0 || !data.header_length ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) data.length < 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) sd_first_printk(KERN_WARNING, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) "getting Control mode page failed, assume no ATO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) if (scsi_sense_valid(&sshdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) sd_print_sense_hdr(sdkp, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) offset = data.header_length + data.block_descriptor_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) if ((buffer[offset] & 0x3f) != 0x0a) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if ((buffer[offset + 5] & 0x80) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) sdkp->ATO = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) * sd_read_block_limits - Query disk device for preferred I/O sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) * @sdkp: disk to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) static void sd_read_block_limits(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) unsigned int sector_sz = sdkp->device->sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) const int vpd_len = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) if (!buffer ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) /* Block Limits VPD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) blk_queue_io_min(sdkp->disk->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) get_unaligned_be16(&buffer[6]) * sector_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) if (buffer[3] == 0x3c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) unsigned int lba_count, desc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) if (!sdkp->lbpme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) lba_count = get_unaligned_be32(&buffer[20]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) desc_count = get_unaligned_be32(&buffer[24]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) if (lba_count && desc_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) sdkp->max_unmap_blocks = lba_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) if (buffer[32] & 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) sdkp->unmap_alignment =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) get_unaligned_be32(&buffer[32]) & ~(1 << 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) if (sdkp->max_unmap_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) sd_config_discard(sdkp, SD_LBP_UNMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) sd_config_discard(sdkp, SD_LBP_WS16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) } else { /* LBP VPD page tells us what to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) if (sdkp->lbpu && sdkp->max_unmap_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) sd_config_discard(sdkp, SD_LBP_UNMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) else if (sdkp->lbpws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) sd_config_discard(sdkp, SD_LBP_WS16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) else if (sdkp->lbpws10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) sd_config_discard(sdkp, SD_LBP_WS10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) sd_config_discard(sdkp, SD_LBP_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) * sd_read_block_characteristics - Query block dev. characteristics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) * @sdkp: disk to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) static void sd_read_block_characteristics(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) struct request_queue *q = sdkp->disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) unsigned char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) u16 rot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) const int vpd_len = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) buffer = kmalloc(vpd_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) if (!buffer ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) /* Block Device Characteristics VPD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) rot = get_unaligned_be16(&buffer[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) if (rot == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) if (sdkp->device->type == TYPE_ZBC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) /* Host-managed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) sdkp->zoned = (buffer[8] >> 4) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) if (sdkp->zoned == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) /* Host-aware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) /* Regular disk or drive managed disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) if (!sdkp->first_scan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (blk_queue_is_zoned(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) if (sdkp->zoned == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) "Host-aware SMR disk used as regular disk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) else if (sdkp->zoned == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) sd_printk(KERN_NOTICE, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) "Drive-managed SMR disk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) * sd_read_block_provisioning - Query provisioning VPD page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) * @sdkp: disk to query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) static void sd_read_block_provisioning(struct scsi_disk *sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) unsigned char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) const int vpd_len = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (sdkp->lbpme == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) buffer = kmalloc(vpd_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) sdkp->lbpvpd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) struct scsi_device *sdev = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) if (sdev->host->no_write_same) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) sdev->no_write_same = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) /* too large values might cause issues with arcmsr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) int vpd_buf_len = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) sdev->no_report_opcodes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) /* Disable WRITE SAME if REPORT SUPPORTED OPERATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) * CODES is unsupported and the device has an ATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) * Information VPD page (SAT).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) sdev->no_write_same = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) sdkp->ws16 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) sdkp->ws10 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) struct scsi_device *sdev = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) if (!sdev->security_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) SECURITY_PROTOCOL_IN) == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) SECURITY_PROTOCOL_OUT) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) sdkp->security = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) * Determine the device's preferred I/O size for reads and writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) * unless the reported value is unreasonably small, large, not a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) * multiple of the physical block size, or simply garbage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) unsigned int dev_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) unsigned int opt_xfer_bytes =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) if (sdkp->opt_xfer_blocks == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) if (sdkp->opt_xfer_blocks > dev_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) sd_first_printk(KERN_WARNING, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) "Optimal transfer size %u logical blocks " \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) "> dev_max (%u logical blocks)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) sdkp->opt_xfer_blocks, dev_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) sd_first_printk(KERN_WARNING, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) "Optimal transfer size %u logical blocks " \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) "> sd driver limit (%u logical blocks)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) if (opt_xfer_bytes < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) sd_first_printk(KERN_WARNING, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) "Optimal transfer size %u bytes < " \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) "PAGE_SIZE (%u bytes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) opt_xfer_bytes, (unsigned int)PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) sd_first_printk(KERN_WARNING, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) "Optimal transfer size %u bytes not a " \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) "multiple of physical block size (%u bytes)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) opt_xfer_bytes, sdkp->physical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) opt_xfer_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) * sd_revalidate_disk - called the first time a new disk is seen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) * performs disk spin up, read_capacity, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) * @disk: struct gendisk we care about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) static int sd_revalidate_disk(struct gendisk *disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) struct scsi_disk *sdkp = scsi_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) struct request_queue *q = sdkp->disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) sector_t old_capacity = sdkp->capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) unsigned char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) unsigned int dev_max, rw_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) "sd_revalidate_disk\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) * If the device is offline, don't try and read capacity or any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) * of the other niceties.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) if (!scsi_device_online(sdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) "allocation failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) sd_spinup_disk(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) * Without media there is no reason to ask; moreover, some devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) * react badly if we do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) if (sdkp->media_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) sd_read_capacity(sdkp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) * set the default to rotational. All non-rotational devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) * support the block characteristics VPD page, which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) * cause this to be updated correctly and any device which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) * doesn't support it should be treated as rotational.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) if (scsi_device_supports_vpd(sdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) sd_read_block_provisioning(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) sd_read_block_limits(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) sd_read_block_characteristics(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) sd_zbc_read_zones(sdkp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) sd_print_capacity(sdkp, old_capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) sd_read_write_protect_flag(sdkp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) sd_read_cache_type(sdkp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) sd_read_app_tag_own(sdkp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) sd_read_write_same(sdkp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) sd_read_security(sdkp, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) * We now have all cache related info, determine how we deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) * with flush requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) sd_set_flush_flag(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) /* Initial block count limit based on CDB TRANSFER LENGTH field size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) /* Some devices report a maximum block count for READ/WRITE requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) q->limits.io_opt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) (sector_t)BLK_DEF_MAX_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) /* Do not exceed controller limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) rw_max = min(rw_max, queue_max_hw_sectors(q));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) * Only update max_sectors if previously unset or if the current value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) * exceeds the capabilities of the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) if (sdkp->first_scan ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) q->limits.max_sectors > q->limits.max_dev_sectors ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) q->limits.max_sectors > q->limits.max_hw_sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) q->limits.max_sectors = rw_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) sdkp->first_scan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) set_capacity_revalidate_and_notify(disk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) logical_to_sectors(sdp, sdkp->capacity), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) sd_config_write_same(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) * For a zoned drive, revalidating the zones can be done only once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) * the gendisk capacity is set. So if this fails, set back the gendisk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) * capacity to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if (sd_zbc_revalidate_zones(sdkp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) set_capacity_revalidate_and_notify(disk, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) * sd_unlock_native_capacity - unlock native capacity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) * @disk: struct gendisk to set capacity for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) * Block layer calls this function if it detects that partitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) * on @disk reach beyond the end of the device. If the SCSI host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) * implements ->unlock_native_capacity() method, it's invoked to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) * give it a chance to adjust the device capacity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) * Defined by block layer. Might sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) static void sd_unlock_native_capacity(struct gendisk *disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) struct scsi_device *sdev = scsi_disk(disk)->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) if (sdev->host->hostt->unlock_native_capacity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) sdev->host->hostt->unlock_native_capacity(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) * sd_format_disk_name - format disk name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) * @prefix: name prefix - ie. "sd" for SCSI disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) * @index: index of the disk to format name for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) * @buf: output buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) * @buflen: length of the output buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) * SCSI disk names starts at sda. The 26th device is sdz and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) * 27th is sdaa. The last one for two lettered suffix is sdzz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) * which is followed by sdaaa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) * This is basically 26 base counting with one extra 'nil' entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) * at the beginning from the second digit on and can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) * determined using similar method as 26 base conversion with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) * index shifted -1 after each digit is computed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) * Don't care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) const int base = 'z' - 'a' + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) char *begin = buf + strlen(prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) char *end = buf + buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) int unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) p = end - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) *p = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) unit = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) if (p == begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) *--p = 'a' + (index % unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) index = (index / unit) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) } while (index >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) memmove(begin, p, end - p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) memcpy(buf, prefix, strlen(prefix));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) * sd_probe - called during driver initialization and whenever a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) * new scsi device is attached to the system. It is called once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) * for each scsi device (not just disks) present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) * @dev: pointer to device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) * Returns 0 if successful (or not interested in this scsi device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) * (e.g. scanner)); 1 when there is an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) * Note: this function is invoked from the scsi mid-level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) * This function sets up the mapping between a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) * <host,channel,id,lun> (found in sdp) and new device name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) * (e.g. /dev/sda). More precisely it is the block device major
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) * and minor number that is chosen here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) * Assume sd_probe is not re-entrant (for time being)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) * Also think about sd_probe() and sd_remove() running coincidentally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) static int sd_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) struct scsi_device *sdp = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) struct scsi_disk *sdkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) struct gendisk *gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) scsi_autopm_get_device(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) if (sdp->type != TYPE_DISK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) sdp->type != TYPE_ZBC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) sdp->type != TYPE_MOD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) sdp->type != TYPE_RBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) #ifndef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) if (sdp->type == TYPE_ZBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) "sd_probe\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) if (!sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) gd = alloc_disk(SD_MINORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) if (!gd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) index = ida_alloc(&sd_index_ida, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) if (index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) goto out_free_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) sdkp->device = sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) sdkp->driver = &sd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) sdkp->disk = gd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) sdkp->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) sdkp->max_retries = SD_MAX_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) atomic_set(&sdkp->openers, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) atomic_set(&sdkp->device->ioerr_cnt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) if (!sdp->request_queue->rq_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) if (sdp->type != TYPE_MOD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) blk_queue_rq_timeout(sdp->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) SD_MOD_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) device_initialize(&sdkp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) sdkp->dev.parent = get_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) sdkp->dev.class = &sd_disk_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) dev_set_name(&sdkp->dev, "%s", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) error = device_add(&sdkp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) put_device(&sdkp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) dev_set_drvdata(dev, sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) device_init_wakeup(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) gd->major = sd_major((index & 0xf0) >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) gd->fops = &sd_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) gd->private_data = &sdkp->driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) gd->queue = sdkp->device->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) /* defaults, until the device tells us otherwise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) sdp->sector_size = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) sdkp->capacity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) sdkp->media_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) sdkp->write_prot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) sdkp->cache_override = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) sdkp->WCE = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) sdkp->RCD = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) sdkp->ATO = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) sdkp->first_scan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) sd_revalidate_disk(gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) gd->flags = GENHD_FL_EXT_DEVT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) if (sdp->removable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) gd->flags |= GENHD_FL_REMOVABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) gd->events |= DISK_EVENT_MEDIA_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) blk_pm_runtime_init(sdp->request_queue, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) if (sdp->rpm_autosuspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) pm_runtime_set_autosuspend_delay(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) sdp->host->hostt->rpm_autosuspend_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) device_add_disk(dev, gd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) if (sdkp->capacity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) sd_dif_config_host(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) sd_revalidate_disk(gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) if (sdkp->security) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) if (sdkp->opal_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) sdp->removable ? "removable " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) scsi_autopm_put_device(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) out_free_index:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) ida_free(&sd_index_ida, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) put_disk(gd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) sd_zbc_release_disk(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) kfree(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) scsi_autopm_put_device(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) * sd_remove - called whenever a scsi disk (previously recognized by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) * sd_probe) is detached from the system. It is called (potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) * multiple times) during sd module unload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) * @dev: pointer to device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) * Note: this function is invoked from the scsi mid-level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) * This function potentially frees up a device name (e.g. /dev/sdc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) * that could be re-used by a subsequent sd_probe().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) * This function is not called when the built-in sd driver is "exit-ed".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) static int sd_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) struct scsi_disk *sdkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) dev_t devt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) sdkp = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) devt = disk_devt(sdkp->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) scsi_autopm_get_device(sdkp->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) async_synchronize_full_domain(&scsi_sd_pm_domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) device_del(&sdkp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) del_gendisk(sdkp->disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) sd_shutdown(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) free_opal_dev(sdkp->opal_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) blk_register_region(devt, SD_MINORS, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) sd_default_probe, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) mutex_lock(&sd_ref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) dev_set_drvdata(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) put_device(&sdkp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) mutex_unlock(&sd_ref_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) * scsi_disk_release - Called to free the scsi_disk structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) * @dev: pointer to embedded class device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) * sd_ref_mutex must be held entering this routine. Because it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) * called on last put, you should always use the scsi_disk_get()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) * scsi_disk_put() helpers which manipulate the semaphore directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) * and never do a direct put_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) static void scsi_disk_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) struct scsi_disk *sdkp = to_scsi_disk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) struct gendisk *disk = sdkp->disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) struct request_queue *q = disk->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) ida_free(&sd_index_ida, sdkp->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) * Wait until all requests that are in progress have completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) * This is necessary to avoid that e.g. scsi_end_request() crashes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) * due to clearing the disk->private_data pointer. Wait from inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) * scsi_disk_release() instead of from sd_release() to avoid that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) * freezing and unfreezing the request queue affects user space I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) * in case multiple processes open a /dev/sd... node concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) blk_mq_freeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) blk_mq_unfreeze_queue(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) disk->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) put_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) put_device(&sdkp->device->sdev_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) sd_zbc_release_disk(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) kfree(sdkp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) unsigned char cmd[6] = { START_STOP }; /* START_VALID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) struct scsi_device *sdp = sdkp->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) cmd[4] |= 1; /* START */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) if (sdp->start_stop_pwr_cond)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) cmd[4] |= start ? 1 << 4 : 3 << 4; /* Active or Standby */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) if (!scsi_device_online(sdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) sd_print_result(sdkp, "Start/Stop Unit failed", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) if (driver_byte(res) == DRIVER_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) sd_print_sense_hdr(sdkp, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) if (scsi_sense_valid(&sshdr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) /* 0x3a is medium not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) sshdr.asc == 0x3a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) /* SCSI error codes must not go to the generic layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) * Send a SYNCHRONIZE CACHE instruction down to the device through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) * the normal SCSI command structure. Wait for the command to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) * complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) static void sd_shutdown(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) struct scsi_disk *sdkp = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) if (!sdkp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) return; /* this can happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) if (pm_runtime_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) if (sdkp->WCE && sdkp->media_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) sd_sync_cache(sdkp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) sd_start_stop_device(sdkp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) struct scsi_disk *sdkp = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) if (sdkp->WCE && sdkp->media_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) ret = sd_sync_cache(sdkp, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) /* ignore OFFLINE device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) if (ret == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) if (!scsi_sense_valid(&sshdr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) sshdr.sense_key != ILLEGAL_REQUEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) * sshdr.sense_key == ILLEGAL_REQUEST means this drive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) * doesn't support sync. There's not much to do and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) * suspend shouldn't fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) if (sdkp->device->manage_start_stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) /* an error is not worth aborting a system sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) ret = sd_start_stop_device(sdkp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) if (ignore_stop_errors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) static int sd_suspend_system(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) return sd_suspend_common(dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) static int sd_suspend_runtime(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) return sd_suspend_common(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) static int sd_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) struct scsi_disk *sdkp = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) if (!sdkp->device->manage_start_stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) /* The wake-up process cannot allow the PM to enter sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) pm_stay_awake(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) ret = sd_start_stop_device(sdkp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) opal_unlock_from_suspend(sdkp->opal_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) pm_relax(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) * init_sd - entry point for this driver (both when built in or when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) * a module).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) * Note: this function registers this driver with the scsi mid-level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) static int __init init_sd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) int majors = 0, i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) for (i = 0; i < SD_MAJORS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) if (register_blkdev(sd_major(i), "sd") != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) majors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) blk_register_region(sd_major(i), SD_MINORS, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) sd_default_probe, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) if (!majors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) err = class_register(&sd_disk_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) if (!sd_cdb_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) printk(KERN_ERR "sd: can't init extended cdb cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) goto err_out_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) if (!sd_cdb_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) printk(KERN_ERR "sd: can't init extended cdb pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) goto err_out_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) if (!sd_page_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) printk(KERN_ERR "sd: can't init discard page pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) goto err_out_ppool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) err = scsi_register_driver(&sd_template.gendrv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) goto err_out_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) err_out_driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) mempool_destroy(sd_page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) err_out_ppool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) mempool_destroy(sd_cdb_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) err_out_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) kmem_cache_destroy(sd_cdb_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) err_out_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) class_unregister(&sd_disk_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) for (i = 0; i < SD_MAJORS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) unregister_blkdev(sd_major(i), "sd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) * exit_sd - exit point for this driver (when it is a module).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) * Note: this function unregisters this driver from the scsi mid-level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) static void __exit exit_sd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) scsi_unregister_driver(&sd_template.gendrv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) mempool_destroy(sd_cdb_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) mempool_destroy(sd_page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) kmem_cache_destroy(sd_cdb_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) class_unregister(&sd_disk_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) for (i = 0; i < SD_MAJORS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) blk_unregister_region(sd_major(i), SD_MINORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) unregister_blkdev(sd_major(i), "sd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) module_init(init_sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) module_exit(exit_sd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) scsi_print_sense_hdr(sdkp->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) const char *hb_string = scsi_hostbyte_string(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) const char *db_string = scsi_driverbyte_string(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) if (hb_string || db_string)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) sd_printk(KERN_INFO, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) hb_string ? hb_string : "invalid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) db_string ? db_string : "invalid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) sd_printk(KERN_INFO, sdkp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) msg, host_byte(result), driver_byte(result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) }