^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _SCSI_SCSI_HOST_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _SCSI_SCSI_HOST_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/android_kabi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct block_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct scsi_cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct scsi_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct scsi_host_cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct scsi_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct Scsi_Host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct scsi_host_cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct scsi_transport_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define SG_ALL SG_CHUNK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define MODE_UNKNOWN 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define MODE_INITIATOR 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define MODE_TARGET 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct scsi_host_template {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct module *module;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * The info function will return whatever useful information the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * developer sees fit. If not provided, then the name field will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * be used instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) const char *(* info)(struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Ioctl interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void __user *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Compat handler. Handle 32bit ABI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * When unknown ioctl is passed return -ENOIOCTLCMD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) void __user *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * The queuecommand function is used to queue up a scsi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * command block to the LLDD. When the driver finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * processing the command the done callback is invoked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * If queuecommand returns 0, then the driver has accepted the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * command. It must also push it to the HBA if the scsi_cmnd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * flag SCMD_LAST is set, or if the driver does not implement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * commit_rqs. The done() function must be called on the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * when the driver has finished with it. (you may call done on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * command before queuecommand returns, but in this case you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * *must* return 0 from queuecommand).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Queuecommand may also reject the command, in which case it may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * not touch the command and must not call done() for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * There are two possible rejection returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * allow commands to other devices serviced by this host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * host temporarily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * For compatibility, any other non-zero return is treated the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * same as SCSI_MLQUEUE_HOST_BUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * NOTE: "temporarily" means either until the next command for#
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * this device/host completes, or a period of time determined by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * I/O pressure in the system if there are no other outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * STATUS: REQUIRED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * The commit_rqs function is used to trigger a hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * doorbell after some requests have been queued with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * queuecommand, when an error is encountered before sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * the request with SCMD_LAST set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * STATUS: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void (*commit_rqs)(struct Scsi_Host *, u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * This is an error handling strategy routine. You don't need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * define one of these if you don't want to - there is a default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * routine that is present that should work in most cases. For those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * driver authors that have the inclination and ability to write their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * own strategy routine, this is where it is specified. Note - the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * strategy routine is *ALWAYS* run in the context of the kernel eh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * thread. Thus you are guaranteed to *NOT* be in an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * handler when you execute this, and you are also guaranteed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * *NOT* have any other commands being queued while you are in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * strategy routine. When you return from this function, operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * return to normal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * See scsi_error.c scsi_unjam_host for additional comments about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * what this function should and should not be attempting to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Status: REQUIRED (at least one of them)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int (* eh_abort_handler)(struct scsi_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) int (* eh_device_reset_handler)(struct scsi_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int (* eh_target_reset_handler)(struct scsi_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) int (* eh_bus_reset_handler)(struct scsi_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int (* eh_host_reset_handler)(struct scsi_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Before the mid layer attempts to scan for a new device where none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * currently exists, it will call this entry in your driver. Should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * your driver need to allocate any structs or perform any other init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * items in order to send commands to a currently unused target/lun
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * combo, then this is where you can perform those allocations. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * is specifically so that drivers won't have to perform any kind of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * "is this a new device" checks in their queuecommand routine,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * thereby making the hot path a bit quicker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Return values: 0 on success, non-0 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * Deallocation: If we didn't find any devices at this ID, you will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * get an immediate call to slave_destroy(). If we find something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * here then you will get a call to slave_configure(), then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * device will be used for however long it is kept around, then when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * the device is removed from the system (or * possibly at reboot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * time), you will then get a call to slave_destroy(). This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * assuming you implement slave_configure and slave_destroy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * However, if you allocate memory and hang it off the device struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * then you must implement the slave_destroy() routine at a minimum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * in order to avoid leaking memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * each time a device is tore down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int (* slave_alloc)(struct scsi_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Once the device has responded to an INQUIRY and we know the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * device is online, we call into the low level driver with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * struct scsi_device *. If the low level device driver implements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * this function, it *must* perform the task of setting the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * depth on the device. All other tasks are optional and depend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * on what the driver supports and various implementation details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Things currently recommended to be handled at this time include:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * 1. Setting the device queue depth. Proper setting of this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * described in the comments for scsi_change_queue_depth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * 2. Determining if the device supports the various synchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * negotiation protocols. The device struct will already have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * responded to INQUIRY and the results of the standard items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * will have been shoved into the various device flag bits, eg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * device->sdtr will be true if the device supports SDTR messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * 3. Allocating command structs that the device will need.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * 4. Setting the default timeout on this device (if needed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * 5. Anything else the low level driver might want to do on a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * specific setup basis...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * 6. Return 0 on success, non-0 on error. The device will be marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * as offline on error so that no access will occur. If you return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * non-0, your slave_destroy routine will never get called for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * device, so don't leave any loose memory hanging around, clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * up after yourself before returning non-0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int (* slave_configure)(struct scsi_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Immediately prior to deallocating the device and after all activity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * has ceased the mid layer calls this point so that the low level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * driver may completely detach itself from the scsi device and vice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * versa. The low level driver is responsible for freeing any memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * it allocated in the slave_alloc or slave_configure calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) void (* slave_destroy)(struct scsi_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * Before the mid layer attempts to scan for a new device attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * to a target where no target currently exists, it will call this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * entry in your driver. Should your driver need to allocate any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * structs or perform any other init items in order to send commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * to a currently unused target, then this is where you can perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * those allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Return values: 0 on success, non-0 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int (* target_alloc)(struct scsi_target *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Immediately prior to deallocating the target structure, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * after all activity to attached scsi devices has ceased, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * midlayer calls this point so that the driver may deallocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * and terminate any references to the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void (* target_destroy)(struct scsi_target *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * If a host has the ability to discover targets on its own instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * of scanning the entire bus, it can fill in this function and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * call scsi_scan_host(). This function will be called periodically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * until it returns 1 with the scsi_host and the elapsed time of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * the scan in jiffies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int (* scan_finished)(struct Scsi_Host *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * If the host wants to be called before the scan starts, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * after the midlayer has set up ready for the scan, it can fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * in this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) void (* scan_start)(struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Fill in this function to allow the queue depth of this host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * to be changeable (on a per device basis). Returns either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * the current queue depth setting (may be different from what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * was passed in) or an error. An error should only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * returned if the requested depth is legal but the driver was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * unable to set it. If the requested depth is illegal, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * driver should set and return the closest legal queue depth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int (* change_queue_depth)(struct scsi_device *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * This functions lets the driver expose the queue mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * to the block layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) int (* map_queues)(struct Scsi_Host *shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * Check if scatterlists need to be padded for DMA draining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) bool (* dma_need_drain)(struct request *rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * This function determines the BIOS parameters for a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * harddisk. These tend to be numbers that are made up by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * the host adapter. Parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * size, device, list (heads, sectors, cylinders)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int (* bios_param)(struct scsi_device *, struct block_device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) sector_t, int []);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * This function is called when one or more partitions on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * device reach beyond the end of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) void (*unlock_native_capacity)(struct scsi_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Can be used to export driver statistics and other infos to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * world outside the kernel ie. userspace and it also provides an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * interface to feed the driver with information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * Status: OBSOLETE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int (*show_info)(struct seq_file *, struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int (*write_info)(struct Scsi_Host *, char *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * This is an optional routine that allows the transport to become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * involved when a scsi io timer fires. The return value tells the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * timer routine how to finish the io timeout handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* This is an optional routine that allows transport to initiate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * LLD adapter or firmware reset using sysfs attribute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * Return values: 0 on success, -ve value on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * Status: OPTIONAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int (*host_reset)(struct Scsi_Host *shost, int reset_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #define SCSI_ADAPTER_RESET 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define SCSI_FIRMWARE_RESET 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * Name of proc directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) const char *proc_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * Used to store the procfs directory if a driver implements the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * show_info method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct proc_dir_entry *proc_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * This determines if we will use a non-interrupt driven
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * or an interrupt driven scheme. It is set to the maximum number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * of simultaneous commands a single hw queue in HBA will accept.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int can_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * In many instances, especially where disconnect / reconnect are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * supported, our host also has an ID on the SCSI bus. If this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * the case, then it must be reserved. Please set this_id to -1 if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * your setup is in single initiator mode, and the host lacks an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int this_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * This determines the degree to which the host adapter is capable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * of scatter-gather.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned short sg_tablesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned short sg_prot_tablesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * Set this if the host adapter has limitations beside segment count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned int max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Maximum size in bytes of a single segment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) unsigned int max_segment_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * DMA scatter gather segment boundary limit. A segment crossing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * boundary will be split in two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) unsigned long dma_boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) unsigned long virt_boundary_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * This specifies "machine infinity" for host templates which don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * limit the transfer size. Note this limit represents an absolute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * maximum, and may be over the transfer limits allowed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * individual devices (e.g. 256 for SCSI-1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #define SCSI_DEFAULT_MAX_SECTORS 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * True if this host adapter can make good use of linked commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * This will allow more than one command to be queued to a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * unit on a given host. Set this to the maximum number of command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * blocks to be provided for each device. Set this to 1 for one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * command block per lun, 2 for two, etc. Do not set this to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * You should make sure that the host adapter will do the right thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * before you try setting this above 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) short cmd_per_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * present contains counter indicating how many boards of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * type were found when we did the scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) unsigned char present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* If use block layer to manage tags, this is tag allocation policy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int tag_alloc_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Track QUEUE_FULL events and reduce queue depth on demand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) unsigned track_queue_depth:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * This specifies the mode that a LLD supports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) unsigned supported_mode:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * True if this host adapter uses unchecked DMA onto an ISA bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) unsigned unchecked_isa_dma:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * True for emulated SCSI host adapters (e.g. ATAPI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned emulated:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * True if the low-level driver performs its own reset-settle delays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) unsigned skip_settle_delay:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* True if the controller does not support WRITE SAME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) unsigned no_write_same:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* True if the host uses host-wide tagspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned host_tagset:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Countdown for host blocking with no commands outstanding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) unsigned int max_host_blocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Default value for the blocking. If the queue is empty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * host_blocked counts down in the request_fn until it restarts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * host operations as zero is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * FIXME: This should probably be a value in the template
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) #define SCSI_DEFAULT_HOST_BLOCKED 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Pointer to the sysfs class properties for this host, NULL terminated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct device_attribute **shost_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Pointer to the SCSI device properties for this host, NULL terminated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct device_attribute **sdev_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Pointer to the SCSI device attribute groups for this host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * NULL terminated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) const struct attribute_group **sdev_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * Vendor Identifier associated with the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Note: When specifying vendor_id, be sure to read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * Vendor Type and ID formatting requirements specified in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * scsi_netlink.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) u64 vendor_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * Additional per-command data allocated for the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) unsigned int cmd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct scsi_host_cmd_pool *cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Delay for runtime autosuspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) int rpm_autosuspend_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ANDROID_KABI_RESERVE(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ANDROID_KABI_RESERVE(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) ANDROID_KABI_RESERVE(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * Temporary #define for host lock push down. Can be removed when all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * drivers have been updated to take advantage of unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * queuecommand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) #define DEF_SCSI_QCMD(func_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) unsigned long irq_flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int rc; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) spin_lock_irqsave(shost->host_lock, irq_flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) rc = func_name##_lck (cmd, cmd->scsi_done); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) spin_unlock_irqrestore(shost->host_lock, irq_flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return rc; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * shost state: If you alter this, you also need to alter scsi_sysfs.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * (for the ascii descriptions) and the state model enforcer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * scsi_host_set_state()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) enum scsi_host_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) SHOST_CREATED = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) SHOST_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) SHOST_CANCEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) SHOST_DEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) SHOST_RECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) SHOST_CANCEL_RECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) SHOST_DEL_RECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct Scsi_Host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * __devices is protected by the host_lock, but you should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * usually use scsi_device_lookup / shost_for_each_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * to access it and don't care about locking yourself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * In the rare case of being in irq context you can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * their __ prefixed variants with the lock held. NEVER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * access this list directly from a driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct list_head __devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct list_head __targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct list_head starved_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) spinlock_t default_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) spinlock_t *host_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct mutex scan_mutex;/* serialize scanning activity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct list_head eh_cmd_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) struct task_struct * ehandler; /* Error recovery thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct completion * eh_action; /* Wait for specific actions on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) host. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) wait_queue_head_t host_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct scsi_host_template *hostt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct scsi_transport_template *transportt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* Area to keep a shared tag map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct blk_mq_tag_set tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) atomic_t host_blocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) unsigned int host_failed; /* commands that failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) protected by host_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) unsigned int host_eh_scheduled; /* EH scheduled without command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* next two fields are used to bound the time spent in error handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int eh_deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) unsigned long last_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * These three parameters can be used to allow for wide scsi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * and for host adapters that support multiple busses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * The last two should be set to 1 more than the actual max id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * or lun (e.g. 8 for SCSI parallel systems).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unsigned int max_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) unsigned int max_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) u64 max_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * This is a unique identifier that must be assigned so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * have some way of identifying each detected host adapter properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * and uniquely. For hosts that do not support more than one card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * in the system at one time, this does not need to be set. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * initialized to 0 in scsi_register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) unsigned int unique_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * The maximum length of SCSI commands that this host can accept.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Probably 12 for most host adapters, but could be 16 for others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * or 260 if the driver supports variable length cdbs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * For drivers that don't set this field, a value of 12 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * assumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) unsigned short max_cmd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int this_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int can_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) short cmd_per_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) short unsigned int sg_tablesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) short unsigned int sg_prot_tablesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) unsigned int max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) unsigned int max_segment_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) unsigned long dma_boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned long virt_boundary_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * In scsi-mq mode, the number of hardware queues supported by the LLD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * Note: it is assumed that each hardware queue has a queue depth of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * can_queue. In other words, the total queue depth per host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * is nr_hw_queues * can_queue. However, for when host_tagset is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * the total queue depth is can_queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) unsigned nr_hw_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) unsigned active_mode:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned unchecked_isa_dma:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * Host has requested that no further requests come through for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * time being.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) unsigned host_self_blocked:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Host uses correct SCSI ordering not PC ordering. The bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * set for the minority of drivers whose authors actually read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * the spec ;).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) unsigned reverse_ordering:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* Task mgmt function in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) unsigned tmf_in_progress:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* Asynchronous scan in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) unsigned async_scan:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /* Don't resume host in EH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) unsigned eh_noresume:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* The controller does not support WRITE SAME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned no_write_same:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* True if the host uses host-wide tagspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) unsigned host_tagset:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* Host responded with short (<36 bytes) INQUIRY result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) unsigned short_inquiry:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* The transport requires the LUN bits NOT to be stored in CDB[1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) unsigned no_scsi2_lun_in_cdb:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * Optional work queue to be utilized by the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) char work_q_name[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct workqueue_struct *work_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * Task management function work queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct workqueue_struct *tmf_work_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * Value host_blocked counts down from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) unsigned int max_host_blocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* Protection Information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) unsigned int prot_capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) unsigned char prot_guard_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* legacy crap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned long base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) unsigned long io_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) unsigned char n_io_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned char dma_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) enum scsi_host_state shost_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* ldm bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct device shost_gendev, shost_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * Points to the transport data (if any) which is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * separately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) void *shost_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * Points to the physical bus device we'd use to do DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * Needed just in case we have virtual hosts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct device *dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ANDROID_KABI_RESERVE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * We should ensure that this is aligned, both for better performance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * and also because some compilers (m68k) don't automatically force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * alignment to a long boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) unsigned long hostdata[] /* Used for storage of host specific stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) __attribute__ ((aligned (sizeof(unsigned long))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) #define class_to_shost(d) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) container_of(d, struct Scsi_Host, shost_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) #define shost_printk(prefix, shost, fmt, a...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static inline void *shost_priv(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return (void *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) int scsi_is_host_device(const struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static inline struct Scsi_Host *dev_to_shost(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) while (!scsi_is_host_device(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (!dev->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) dev = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return container_of(dev, struct Scsi_Host, shost_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return shost->shost_state == SHOST_RECOVERY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) shost->shost_state == SHOST_CANCEL_RECOVERY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) shost->shost_state == SHOST_DEL_RECOVERY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) shost->tmf_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) extern void scsi_flush_work(struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) extern void scsi_scan_host(struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) extern void scsi_rescan_device(struct device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) extern void scsi_remove_host(struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) extern int scsi_host_busy(struct Scsi_Host *shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) extern void scsi_host_put(struct Scsi_Host *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) extern struct Scsi_Host *scsi_host_lookup(unsigned short);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) extern const char *scsi_host_state_name(enum scsi_host_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static inline int __must_check scsi_add_host(struct Scsi_Host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return scsi_add_host_with_dma(host, dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) static inline struct device *scsi_get_device(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return shost->shost_gendev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * scsi_host_scan_allowed - Is scanning of this host allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * @shost: Pointer to Scsi_Host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return shost->shost_state == SHOST_RUNNING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) shost->shost_state == SHOST_RECOVERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) extern void scsi_unblock_requests(struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) extern void scsi_block_requests(struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) extern int scsi_host_block(struct Scsi_Host *shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) void scsi_host_busy_iter(struct Scsi_Host *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct class_container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * These two functions are used to allocate and free a pseudo device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * which will connect to the host adapter itself rather than any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * physical device. You must deallocate when you are done with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * thing. This physical pseudo-device isn't real and won't be available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * from any high-level drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) extern void scsi_free_host_dev(struct scsi_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * DIF defines the exchange of protection information between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * initiator and SBC block device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * DIX defines the exchange of protection information between OS and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * initiator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) enum scsi_host_prot_capabilities {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * SCSI hosts which support the Data Integrity Extensions must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * indicate their capabilities by setting the prot_capabilities using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) shost->prot_capabilities = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return shost->prot_capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static unsigned char cap[] = { 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) SHOST_DIF_TYPE1_PROTECTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) SHOST_DIF_TYPE2_PROTECTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) SHOST_DIF_TYPE3_PROTECTION };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (target_type >= ARRAY_SIZE(cap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return shost->prot_capabilities & cap[target_type] ? target_type : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) #if defined(CONFIG_BLK_DEV_INTEGRITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) SHOST_DIX_TYPE1_PROTECTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) SHOST_DIX_TYPE2_PROTECTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) SHOST_DIX_TYPE3_PROTECTION };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (target_type >= ARRAY_SIZE(cap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return shost->prot_capabilities & cap[target_type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * All DIX-capable initiators must support the T10-mandated CRC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * checksum. Controllers can optionally implement the IP checksum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * scheme which has much lower impact on system performance. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * that the main rationale for the checksum is to match integrity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * metadata with data. Detecting bit errors are a job for ECC memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * and buses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) enum scsi_host_guard_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) SHOST_DIX_GUARD_CRC = 1 << 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) SHOST_DIX_GUARD_IP = 1 << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) shost->prot_guard_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return shost->prot_guard_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) #endif /* _SCSI_SCSI_HOST_H */