^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Horst Hummel <Horst.Hummel@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Carsten Otte <Cotte@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Martin Schwidefsky <schwidefsky@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Bugreports.to..: <Linux390@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright IBM Corp. 1999, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define KMSG_COMPONENT "dasd"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/major.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/async.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/ccwdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/ebcdic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/idals.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/itcw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* This is ugly... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define PRINTK_HEADER "dasd:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "dasd_int.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * SECTION: Constant definitions to be used within this file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define DASD_CHANQ_MAX_SIZE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DASD_DIAG_MOD "dasd_diag_mod"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static unsigned int queue_depth = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static unsigned int nr_hw_queues = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) module_param(queue_depth, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) module_param(nr_hw_queues, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * SECTION: exported variables of dasd.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) debug_info_t *dasd_debug_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) EXPORT_SYMBOL(dasd_debug_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct dentry *dasd_debugfs_root_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct dasd_discipline *dasd_diag_discipline_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) EXPORT_SYMBOL(dasd_diag_discipline_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) " Copyright IBM Corp. 2000");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) MODULE_SUPPORTED_DEVICE("dasd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * SECTION: prototypes for static functions of dasd.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static int dasd_alloc_queue(struct dasd_block *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void dasd_free_queue(struct dasd_block *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int dasd_flush_block_queue(struct dasd_block *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static void dasd_device_tasklet(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static void dasd_block_tasklet(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static void do_kick_device(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static void do_restore_device(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static void do_reload_device(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static void do_requeue_requests(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static void dasd_device_timeout(struct timer_list *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void dasd_block_timeout(struct timer_list *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static void dasd_profile_init(struct dasd_profile *, struct dentry *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void dasd_profile_exit(struct dasd_profile *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static void dasd_hosts_init(struct dentry *, struct dasd_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static void dasd_hosts_exit(struct dasd_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * SECTION: Operations on the device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static wait_queue_head_t dasd_init_waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static wait_queue_head_t dasd_flush_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static wait_queue_head_t generic_waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static wait_queue_head_t shutdown_waitq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * Allocate memory for a new device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct dasd_device *dasd_alloc_device(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Get two pages for normal block device operations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!device->ccw_mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) kfree(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Get one page for error recovery. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!device->erp_mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) free_pages((unsigned long) device->ccw_mem, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) kfree(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Get two pages for ese format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (!device->ese_mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) free_page((unsigned long) device->erp_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) free_pages((unsigned long) device->ccw_mem, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) kfree(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) spin_lock_init(&device->mem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) atomic_set(&device->tasklet_scheduled, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) tasklet_init(&device->tasklet, dasd_device_tasklet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) (unsigned long) device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) INIT_LIST_HEAD(&device->ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) timer_setup(&device->timer, dasd_device_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) INIT_WORK(&device->kick_work, do_kick_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) INIT_WORK(&device->restore_device, do_restore_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) INIT_WORK(&device->reload_device, do_reload_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) INIT_WORK(&device->requeue_requests, do_requeue_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) device->state = DASD_STATE_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) device->target = DASD_STATE_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) mutex_init(&device->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spin_lock_init(&device->profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Free memory of a device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void dasd_free_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) kfree(device->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) free_pages((unsigned long) device->ese_mem, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) free_page((unsigned long) device->erp_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) free_pages((unsigned long) device->ccw_mem, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) kfree(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Allocate memory for a new device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct dasd_block *dasd_alloc_block(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct dasd_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) block = kzalloc(sizeof(*block), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (!block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* open_count = 0 means device online but not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) atomic_set(&block->open_count, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) atomic_set(&block->tasklet_scheduled, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) tasklet_init(&block->tasklet, dasd_block_tasklet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) (unsigned long) block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) INIT_LIST_HEAD(&block->ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spin_lock_init(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) INIT_LIST_HEAD(&block->format_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) spin_lock_init(&block->format_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) timer_setup(&block->timer, dasd_block_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) spin_lock_init(&block->profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) EXPORT_SYMBOL_GPL(dasd_alloc_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Free memory of a device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) void dasd_free_block(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) kfree(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) EXPORT_SYMBOL_GPL(dasd_free_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Make a new device known to the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static int dasd_state_new_to_known(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * As long as the device is not in state DASD_STATE_NEW we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * keep the reference count > 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (device->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rc = dasd_alloc_queue(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) device->state = DASD_STATE_KNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Let the system forget about a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static int dasd_state_known_to_new(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Disable extended error reporting for this device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dasd_eer_disable(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) device->state = DASD_STATE_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (device->block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) dasd_free_queue(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Give up reference we took in dasd_state_new_to_known. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static struct dentry *dasd_debugfs_setup(const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct dentry *base_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct dentry *pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!base_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) pde = debugfs_create_dir(name, base_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!pde || IS_ERR(pde))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Request the irq line for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int dasd_state_known_to_basic(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct dasd_block *block = device->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Allocate and register gendisk structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) rc = dasd_gendisk_alloc(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) block->debugfs_dentry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dasd_debugfs_setup(block->gdp->disk_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) dasd_debugfs_root_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) dasd_profile_init(&block->profile, block->debugfs_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (dasd_global_profile_level == DASD_PROFILE_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dasd_profile_on(&device->block->profile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) device->debugfs_dentry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dasd_debugfs_setup(dev_name(&device->cdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) dasd_debugfs_root_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) dasd_profile_init(&device->profile, device->debugfs_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) dasd_hosts_init(device->debugfs_dentry, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* register 'device' debug area, used for all DBF_DEV_XXX calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 8 * sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) debug_register_view(device->debug_area, &debug_sprintf_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) debug_set_level(device->debug_area, DBF_WARNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) device->state = DASD_STATE_BASIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * Release the irq line for the device. Terminate any running i/o.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static int dasd_state_basic_to_known(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (device->discipline->basic_to_known) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rc = device->discipline->basic_to_known(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (device->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) dasd_profile_exit(&device->block->profile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) debugfs_remove(device->block->debugfs_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) dasd_gendisk_free(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) dasd_block_clear_timer(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) rc = dasd_flush_device_queue(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dasd_device_clear_timer(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) dasd_profile_exit(&device->profile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dasd_hosts_exit(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) debugfs_remove(device->debugfs_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (device->debug_area != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) debug_unregister(device->debug_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) device->debug_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) device->state = DASD_STATE_KNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Do the initial analysis. The do_analysis function may return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * until the discipline decides to continue the startup sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * by calling the function dasd_change_state. The eckd disciplines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * uses this to start a ccw that detects the format. The completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * interrupt for this detection ccw uses the kernel event daemon to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * trigger the call to dasd_change_state. All this is done in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * discipline code, see dasd_eckd.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * After the analysis ccw is done (do_analysis returned 0) the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * device is setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * In case the analysis returns an error, the device setup is stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * (a fake disk was already added to allow formatting).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static int dasd_state_basic_to_ready(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct dasd_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) block = device->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* make disk known with correct capacity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (block->base->discipline->do_analysis != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) rc = block->base->discipline->do_analysis(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (rc != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) device->state = DASD_STATE_UNFMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) disk = device->block->gdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) kobject_uevent(&disk_to_dev(disk)->kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (device->discipline->setup_blk_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) device->discipline->setup_blk_queue(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) set_capacity(block->gdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) block->blocks << block->s2b_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) device->state = DASD_STATE_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) rc = dasd_scan_partitions(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) device->state = DASD_STATE_BASIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) device->state = DASD_STATE_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (device->discipline->basic_to_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) rc = device->discipline->basic_to_ready(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int _wait_for_empty_queues(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (device->block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return list_empty(&device->ccw_queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) list_empty(&device->block->ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return list_empty(&device->ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * Remove device from block device layer. Destroy dirty buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * Forget format information. Check if the target level is basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * and if it is create fake disk for formatting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int dasd_state_ready_to_basic(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) device->state = DASD_STATE_BASIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (device->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct dasd_block *block = device->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) rc = dasd_flush_block_queue(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) device->state = DASD_STATE_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) dasd_destroy_partitions(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) block->blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) block->bp_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) block->s2b_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Back to basic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static int dasd_state_unfmt_to_basic(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) device->state = DASD_STATE_BASIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * Make the device online and schedule the bottom half to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * the requeueing of requests from the linux request queue to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * ccw queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dasd_state_ready_to_online(struct dasd_device * device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct disk_part_iter piter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct hd_struct *part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) device->state = DASD_STATE_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (device->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if ((device->features & DASD_FEATURE_USERAW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) disk = device->block->gdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) disk = device->block->bdev->bd_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) while ((part = disk_part_iter_next(&piter)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) disk_part_iter_exit(&piter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * Stop the requeueing of requests again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static int dasd_state_online_to_ready(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct disk_part_iter piter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct hd_struct *part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (device->discipline->online_to_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) rc = device->discipline->online_to_ready(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) device->state = DASD_STATE_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (device->block && !(device->features & DASD_FEATURE_USERAW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) disk = device->block->bdev->bd_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) while ((part = disk_part_iter_next(&piter)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) disk_part_iter_exit(&piter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * Device startup state changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static int dasd_increase_state(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (device->state == DASD_STATE_NEW &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) device->target >= DASD_STATE_KNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) rc = dasd_state_new_to_known(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!rc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) device->state == DASD_STATE_KNOWN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) device->target >= DASD_STATE_BASIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) rc = dasd_state_known_to_basic(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!rc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) device->state == DASD_STATE_BASIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) device->target >= DASD_STATE_READY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) rc = dasd_state_basic_to_ready(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!rc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) device->state == DASD_STATE_UNFMT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) device->target > DASD_STATE_UNFMT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) rc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!rc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) device->state == DASD_STATE_READY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) device->target >= DASD_STATE_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) rc = dasd_state_ready_to_online(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * Device shutdown state changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static int dasd_decrease_state(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (device->state == DASD_STATE_ONLINE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) device->target <= DASD_STATE_READY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) rc = dasd_state_online_to_ready(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (!rc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) device->state == DASD_STATE_READY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) device->target <= DASD_STATE_BASIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) rc = dasd_state_ready_to_basic(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (!rc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) device->state == DASD_STATE_UNFMT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) device->target <= DASD_STATE_BASIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) rc = dasd_state_unfmt_to_basic(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!rc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) device->state == DASD_STATE_BASIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) device->target <= DASD_STATE_KNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) rc = dasd_state_basic_to_known(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (!rc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) device->state == DASD_STATE_KNOWN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) device->target <= DASD_STATE_NEW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) rc = dasd_state_known_to_new(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * This is the main startup/shutdown routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static void dasd_change_state(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (device->state == device->target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Already where we want to go today... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (device->state < device->target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) rc = dasd_increase_state(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) rc = dasd_decrease_state(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (rc == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) device->target = device->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* let user-space know that the device status changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (device->state == device->target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) wake_up(&dasd_init_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * Kick starter for devices that did not complete the startup/shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * procedure or were sleeping because of a pending state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * dasd_kick_device will schedule a call do do_kick_device to the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * event daemon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static void do_kick_device(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) mutex_lock(&device->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) dasd_change_state(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) mutex_unlock(&device->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) void dasd_kick_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* queue call to dasd_kick_device to the kernel event daemon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!schedule_work(&device->kick_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) EXPORT_SYMBOL(dasd_kick_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * dasd_reload_device will schedule a call do do_reload_device to the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * event daemon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static void do_reload_device(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct dasd_device *device = container_of(work, struct dasd_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) reload_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) device->discipline->reload(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) void dasd_reload_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* queue call to dasd_reload_device to the kernel event daemon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (!schedule_work(&device->reload_device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) EXPORT_SYMBOL(dasd_reload_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * dasd_restore_device will schedule a call do do_restore_device to the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * event daemon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static void do_restore_device(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct dasd_device *device = container_of(work, struct dasd_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) restore_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) device->cdev->drv->restore(device->cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) void dasd_restore_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* queue call to dasd_restore_device to the kernel event daemon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (!schedule_work(&device->restore_device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * Set the target state for a device and starts the state change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) void dasd_set_target_state(struct dasd_device *device, int target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) mutex_lock(&device->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* If we are in probeonly mode stop at DASD_STATE_READY. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (dasd_probeonly && target > DASD_STATE_READY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) target = DASD_STATE_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (device->target != target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (device->state == target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) wake_up(&dasd_init_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) device->target = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (device->state != device->target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) dasd_change_state(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) mutex_unlock(&device->state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) EXPORT_SYMBOL(dasd_set_target_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * Enable devices with device numbers in [from..to].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static inline int _wait_for_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return (device->state == device->target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) void dasd_enable_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) dasd_set_target_state(device, DASD_STATE_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (device->state <= DASD_STATE_KNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* No discipline for device found. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) dasd_set_target_state(device, DASD_STATE_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* Now wait for the devices to come up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) wait_event(dasd_init_waitq, _wait_for_device(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dasd_reload_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (device->discipline->kick_validate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) device->discipline->kick_validate(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) EXPORT_SYMBOL(dasd_enable_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) #ifdef CONFIG_DASD_PROFILE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct dasd_profile dasd_global_profile = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static struct dentry *dasd_debugfs_global_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * Add profiling information for cqr before execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) static void dasd_profile_start(struct dasd_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct dasd_ccw_req *cqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct list_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) unsigned int counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* count the length of the chanq for statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (dasd_global_profile_level || block->profile.data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) list_for_each(l, &block->ccw_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (++counter >= 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) spin_lock(&dasd_global_profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (dasd_global_profile.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dasd_global_profile.data->dasd_io_nr_req[counter]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (rq_data_dir(req) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) dasd_global_profile.data->dasd_read_nr_req[counter]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) spin_unlock(&dasd_global_profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) spin_lock(&block->profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (block->profile.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) block->profile.data->dasd_io_nr_req[counter]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (rq_data_dir(req) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) block->profile.data->dasd_read_nr_req[counter]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) spin_unlock(&block->profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * We count the request for the start device, even though it may run on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * some other device due to error recovery. This way we make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * we count each request only once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (device->profile.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) counter = 1; /* request is not yet queued on the start device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) list_for_each(l, &device->ccw_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (++counter >= 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) spin_lock(&device->profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (device->profile.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) device->profile.data->dasd_io_nr_req[counter]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (rq_data_dir(req) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) device->profile.data->dasd_read_nr_req[counter]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) spin_unlock(&device->profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * Add profiling information for cqr after execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) #define dasd_profile_counter(value, index) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) for (index = 0; index < 31 && value >> (2+index); index++) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static void dasd_profile_end_add_data(struct dasd_profile_info *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int is_alias,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int is_tpm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) int is_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) long sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int sectors_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int tottime_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int tottimeps_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int strtime_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) int irqtime_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int irqtimeps_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int endtime_ind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* in case of an overflow, reset the whole profile */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (data->dasd_io_reqs == UINT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) memset(data, 0, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ktime_get_real_ts64(&data->starttod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) data->dasd_io_reqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) data->dasd_io_sects += sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (is_alias)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) data->dasd_io_alias++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (is_tpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) data->dasd_io_tpm++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) data->dasd_io_secs[sectors_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) data->dasd_io_times[tottime_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) data->dasd_io_timps[tottimeps_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) data->dasd_io_time1[strtime_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) data->dasd_io_time2[irqtime_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) data->dasd_io_time2ps[irqtimeps_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) data->dasd_io_time3[endtime_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (is_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) data->dasd_read_reqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) data->dasd_read_sects += sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (is_alias)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) data->dasd_read_alias++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (is_tpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) data->dasd_read_tpm++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) data->dasd_read_secs[sectors_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) data->dasd_read_times[tottime_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) data->dasd_read_time1[strtime_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) data->dasd_read_time2[irqtime_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) data->dasd_read_time3[endtime_ind]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static void dasd_profile_end(struct dasd_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct dasd_ccw_req *cqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) unsigned long strtime, irqtime, endtime, tottime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned long tottimeps, sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int irqtime_ind, irqtimeps_ind, endtime_ind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct dasd_profile_info *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (!(dasd_global_profile_level ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) block->profile.data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) device->profile.data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) sectors = blk_rq_sectors(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (!cqr->buildclk || !cqr->startclk ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) !cqr->stopclk || !cqr->endclk ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) !sectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) strtime = ((cqr->startclk - cqr->buildclk) >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) endtime = ((cqr->endclk - cqr->stopclk) >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) tottime = ((cqr->endclk - cqr->buildclk) >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) tottimeps = tottime / sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) dasd_profile_counter(sectors, sectors_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) dasd_profile_counter(tottime, tottime_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) dasd_profile_counter(tottimeps, tottimeps_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) dasd_profile_counter(strtime, strtime_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dasd_profile_counter(irqtime, irqtime_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dasd_profile_counter(endtime, endtime_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) spin_lock(&dasd_global_profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (dasd_global_profile.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) data = dasd_global_profile.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) data->dasd_sum_times += tottime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) data->dasd_sum_time_str += strtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) data->dasd_sum_time_irq += irqtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) data->dasd_sum_time_end += endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) dasd_profile_end_add_data(dasd_global_profile.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) cqr->startdev != block->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) cqr->cpmode == 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) rq_data_dir(req) == READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) sectors, sectors_ind, tottime_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) tottimeps_ind, strtime_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) irqtime_ind, irqtimeps_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) endtime_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) spin_unlock(&dasd_global_profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) spin_lock(&block->profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (block->profile.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) data = block->profile.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) data->dasd_sum_times += tottime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) data->dasd_sum_time_str += strtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) data->dasd_sum_time_irq += irqtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) data->dasd_sum_time_end += endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) dasd_profile_end_add_data(block->profile.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) cqr->startdev != block->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) cqr->cpmode == 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) rq_data_dir(req) == READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) sectors, sectors_ind, tottime_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) tottimeps_ind, strtime_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) irqtime_ind, irqtimeps_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) endtime_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) spin_unlock(&block->profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) spin_lock(&device->profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (device->profile.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) data = device->profile.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) data->dasd_sum_times += tottime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) data->dasd_sum_time_str += strtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) data->dasd_sum_time_irq += irqtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) data->dasd_sum_time_end += endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) dasd_profile_end_add_data(device->profile.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) cqr->startdev != block->base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) cqr->cpmode == 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) rq_data_dir(req) == READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) sectors, sectors_ind, tottime_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) tottimeps_ind, strtime_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) irqtime_ind, irqtimeps_ind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) endtime_ind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) spin_unlock(&device->profile.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) void dasd_profile_reset(struct dasd_profile *profile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct dasd_profile_info *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) spin_lock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) data = profile->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) spin_unlock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) memset(data, 0, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ktime_get_real_ts64(&data->starttod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) spin_unlock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int dasd_profile_on(struct dasd_profile *profile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct dasd_profile_info *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) data = kzalloc(sizeof(*data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) spin_lock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (profile->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) spin_unlock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ktime_get_real_ts64(&data->starttod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) profile->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) spin_unlock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) void dasd_profile_off(struct dasd_profile *profile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) spin_lock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) kfree(profile->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) profile->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) spin_unlock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) char *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) buffer = vmalloc(user_len + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (buffer == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (copy_from_user(buffer, user_buf, user_len) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) vfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* got the string, now strip linefeed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (buffer[user_len - 1] == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) buffer[user_len - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) buffer[user_len] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) static ssize_t dasd_stats_write(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) const char __user *user_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) size_t user_len, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) char *buffer, *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct seq_file *m = (struct seq_file *)file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct dasd_profile *prof = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (user_len > 65536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) user_len = 65536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) buffer = dasd_get_user_string(user_buf, user_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (IS_ERR(buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return PTR_ERR(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) str = skip_spaces(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) rc = user_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (strncmp(str, "reset", 5) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) dasd_profile_reset(prof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) } else if (strncmp(str, "on", 2) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) rc = dasd_profile_on(prof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) rc = user_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (prof == &dasd_global_profile) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) dasd_profile_reset(prof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) } else if (strncmp(str, "off", 3) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (prof == &dasd_global_profile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) dasd_global_profile_level = DASD_PROFILE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) dasd_profile_off(prof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) vfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static void dasd_stats_array(struct seq_file *m, unsigned int *array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) for (i = 0; i < 32; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) seq_printf(m, "%u ", array[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) seq_putc(m, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static void dasd_stats_seq_print(struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct dasd_profile_info *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) seq_printf(m, "start_time %lld.%09ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) data->dasd_sum_times / data->dasd_io_reqs : 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) seq_puts(m, "histogram_sectors ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) dasd_stats_array(m, data->dasd_io_secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) seq_puts(m, "histogram_io_times ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) dasd_stats_array(m, data->dasd_io_times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) seq_puts(m, "histogram_io_times_weighted ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) dasd_stats_array(m, data->dasd_io_timps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) seq_puts(m, "histogram_time_build_to_ssch ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dasd_stats_array(m, data->dasd_io_time1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) seq_puts(m, "histogram_time_ssch_to_irq ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) dasd_stats_array(m, data->dasd_io_time2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) dasd_stats_array(m, data->dasd_io_time2ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) seq_puts(m, "histogram_time_irq_to_end ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) dasd_stats_array(m, data->dasd_io_time3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) seq_puts(m, "histogram_ccw_queue_length ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) dasd_stats_array(m, data->dasd_io_nr_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) seq_puts(m, "histogram_read_sectors ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) dasd_stats_array(m, data->dasd_read_secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) seq_puts(m, "histogram_read_times ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dasd_stats_array(m, data->dasd_read_times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) seq_puts(m, "histogram_read_time_build_to_ssch ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) dasd_stats_array(m, data->dasd_read_time1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) seq_puts(m, "histogram_read_time_ssch_to_irq ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) dasd_stats_array(m, data->dasd_read_time2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) seq_puts(m, "histogram_read_time_irq_to_end ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) dasd_stats_array(m, data->dasd_read_time3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) seq_puts(m, "histogram_read_ccw_queue_length ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) dasd_stats_array(m, data->dasd_read_nr_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static int dasd_stats_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct dasd_profile *profile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct dasd_profile_info *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) profile = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) spin_lock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) data = profile->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) spin_unlock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) seq_puts(m, "disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) dasd_stats_seq_print(m, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) spin_unlock_bh(&profile->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static int dasd_stats_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct dasd_profile *profile = inode->i_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return single_open(file, dasd_stats_show, profile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static const struct file_operations dasd_stats_raw_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) .open = dasd_stats_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) .read = seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) .llseek = seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) .release = single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) .write = dasd_stats_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static void dasd_profile_init(struct dasd_profile *profile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct dentry *base_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) umode_t mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct dentry *pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (!base_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) profile->dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) profile->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) mode = (S_IRUSR | S_IWUSR | S_IFREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) pde = debugfs_create_file("statistics", mode, base_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) profile, &dasd_stats_raw_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (pde && !IS_ERR(pde))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) profile->dentry = pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static void dasd_profile_exit(struct dasd_profile *profile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) dasd_profile_off(profile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) debugfs_remove(profile->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) profile->dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static void dasd_statistics_removeroot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) dasd_global_profile_level = DASD_PROFILE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) dasd_profile_exit(&dasd_global_profile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) debugfs_remove(dasd_debugfs_global_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) debugfs_remove(dasd_debugfs_root_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static void dasd_statistics_createroot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct dentry *pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dasd_debugfs_root_entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) pde = debugfs_create_dir("dasd", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!pde || IS_ERR(pde))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) dasd_debugfs_root_entry = pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!pde || IS_ERR(pde))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) dasd_debugfs_global_entry = pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) DBF_EVENT(DBF_ERR, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) "Creation of the dasd debugfs interface failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) dasd_statistics_removeroot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) #define dasd_profile_start(block, cqr, req) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) #define dasd_profile_end(block, cqr, req) do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static void dasd_statistics_createroot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static void dasd_statistics_removeroot(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) int dasd_stats_generic_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) seq_puts(m, "Statistics are not activated in this kernel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static void dasd_profile_init(struct dasd_profile *profile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct dentry *base_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static void dasd_profile_exit(struct dasd_profile *profile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) int dasd_profile_on(struct dasd_profile *profile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) #endif /* CONFIG_DASD_PROFILE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static int dasd_hosts_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) int rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) device = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (device->discipline->hosts_print)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) rc = device->discipline->hosts_print(device, m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static void dasd_hosts_exit(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) debugfs_remove(device->hosts_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) device->hosts_dentry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static void dasd_hosts_init(struct dentry *base_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) struct dentry *pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) umode_t mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (!base_dentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) mode = S_IRUSR | S_IFREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) pde = debugfs_create_file("host_access_list", mode, base_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) device, &dasd_hosts_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (pde && !IS_ERR(pde))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) device->hosts_dentry = pde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) char *data, *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (cplength > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) size += cplength * sizeof(struct ccw1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (datasize > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) size += datasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (!cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) size += (sizeof(*cqr) + 7L) & -8L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) spin_lock_irqsave(&device->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) spin_unlock_irqrestore(&device->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (!chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (!cqr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) cqr = (void *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) data += (sizeof(*cqr) + 7L) & -8L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) memset(cqr, 0, sizeof(*cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) cqr->mem_chunk = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (cplength > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) cqr->cpaddr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) data += cplength * sizeof(struct ccw1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (datasize > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) cqr->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) memset(cqr->data, 0, datasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) cqr->magic = magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) EXPORT_SYMBOL(dasd_smalloc_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) int datasize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) int size, cqr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) cqr_size = (sizeof(*cqr) + 7L) & -8L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) size = cqr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (cplength > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) size += cplength * sizeof(struct ccw1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (datasize > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) size += datasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) spin_lock_irqsave(&device->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) cqr = dasd_alloc_chunk(&device->ese_chunks, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) spin_unlock_irqrestore(&device->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (!cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) memset(cqr, 0, sizeof(*cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) data = (char *)cqr + cqr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) cqr->cpaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (cplength > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) cqr->cpaddr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) data += cplength * sizeof(struct ccw1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) cqr->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (datasize > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) cqr->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) memset(cqr->data, 0, datasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) cqr->magic = magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) EXPORT_SYMBOL(dasd_fmalloc_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) spin_lock_irqsave(&device->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) spin_unlock_irqrestore(&device->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) EXPORT_SYMBOL(dasd_sfree_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) spin_lock_irqsave(&device->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) dasd_free_chunk(&device->ese_chunks, cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) spin_unlock_irqrestore(&device->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) EXPORT_SYMBOL(dasd_ffree_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * Check discipline magic in cqr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (cqr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) DBF_DEV_EVENT(DBF_WARNING, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) " dasd_ccw_req 0x%08x magic doesn't match"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) " discipline 0x%08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) cqr->magic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *(unsigned int *) device->discipline->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * Terminate the current i/o and set the request to clear_pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * Timer keeps device runnig.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * ccw_device_clear can fail if the i/o subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * is in a bad mood.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) int dasd_term_IO(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) int retries, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) char errorstring[ERRORLENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /* Check the cqr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) rc = dasd_check_cqr(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) device = (struct dasd_device *) cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) rc = ccw_device_clear(device->cdev, (long) cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) case 0: /* termination successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) cqr->status = DASD_CQR_CLEAR_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) cqr->stopclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) cqr->starttime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) DBF_DEV_EVENT(DBF_DEBUG, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) "terminate cqr %p successful",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) case -ENODEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) DBF_DEV_EVENT(DBF_ERR, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) "device gone, retry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) case -EINVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * device not valid so no I/O could be running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * handle CQR as termination successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) cqr->status = DASD_CQR_CLEARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) cqr->stopclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) cqr->starttime = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /* no retries for invalid devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) cqr->retries = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) DBF_DEV_EVENT(DBF_ERR, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) "EINVAL, handle as terminated");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* fake rc to success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /* internal error 10 - unknown rc*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) dev_err(&device->cdev->dev, "An error occurred in the "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) "DASD device driver, reason=%s\n", errorstring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) EXPORT_SYMBOL(dasd_term_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * Start the i/o. This start_IO can fail if the channel is really busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * In that case set up a timer to start the request later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) int dasd_start_IO(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) char errorstring[ERRORLENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /* Check the cqr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) rc = dasd_check_cqr(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) cqr->intrc = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) device = (struct dasd_device *) cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (((cqr->block &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) "because of stolen lock", cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) cqr->status = DASD_CQR_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) cqr->intrc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (cqr->retries < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /* internal error 14 - start_IO run out of retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) sprintf(errorstring, "14 %p", cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) dev_err(&device->cdev->dev, "An error occurred in the DASD "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) "device driver, reason=%s\n", errorstring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) cqr->status = DASD_CQR_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) cqr->startclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) cqr->starttime = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) cqr->retries--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) cqr->lpm &= dasd_path_get_opm(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (!cqr->lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) cqr->lpm = dasd_path_get_opm(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (cqr->cpmode == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) (long) cqr, cqr->lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) rc = ccw_device_start(device->cdev, cqr->cpaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) (long) cqr, cqr->lpm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) cqr->status = DASD_CQR_IN_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) case -EBUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) "start_IO: device busy, retry later");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /* -EACCES indicates that the request used only a subset of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * available paths and all these paths are gone. If the lpm of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * this request was only a subset of the opm (e.g. the ppm) then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * we just do a retry with all available paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * If we already use the full opm, something is amiss, and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * need a full path verification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) DBF_DEV_EVENT(DBF_WARNING, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) "start_IO: selected paths gone (%x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) cqr->lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) } else if (cqr->lpm != dasd_path_get_opm(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) cqr->lpm = dasd_path_get_opm(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) "start_IO: selected paths gone,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) " retry on all paths");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) "start_IO: all paths in opm gone,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) " do path verification");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) dasd_generic_last_path_gone(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) dasd_path_no_path(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) dasd_path_set_tbvpm(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) ccw_device_get_path_mask(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) case -ENODEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) "start_IO: -ENODEV device gone, retry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) case -EIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) "start_IO: -EIO device gone, retry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) case -EINVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /* most likely caused in power management context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) "start_IO: -EINVAL device currently "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) "not accessible");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) /* internal error 11 - unknown rc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) "An error occurred in the DASD device driver, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) "reason=%s\n", errorstring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) cqr->intrc = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) EXPORT_SYMBOL(dasd_start_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * Timeout function for dasd devices. This is used for different purposes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * 1) missing interrupt handler for normal operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * 2) delayed start of request where start_IO failed with -EBUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) * 3) timeout for missing state change interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * DASD_CQR_QUEUED for 2) and 3).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) static void dasd_device_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) device = from_timer(device, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) /* re-activate request queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * Setup timeout for a device in jiffies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) void dasd_device_set_timer(struct dasd_device *device, int expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (expires == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) del_timer(&device->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) mod_timer(&device->timer, jiffies + expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) EXPORT_SYMBOL(dasd_device_set_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * Clear timeout for a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) void dasd_device_clear_timer(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) del_timer(&device->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) EXPORT_SYMBOL(dasd_device_clear_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) static void dasd_handle_killed_request(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) unsigned long intparm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (!intparm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) cqr = (struct dasd_ccw_req *) intparm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (cqr->status != DASD_CQR_IN_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) DBF_EVENT_DEVID(DBF_DEBUG, cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) "invalid status in handle_killed_request: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) "%02x", cqr->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) device = dasd_device_from_cdev_locked(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (IS_ERR(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) "unable to get device from cdev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (!cqr->startdev ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) device != cqr->startdev ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) strncmp(cqr->startdev->discipline->ebcname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) (char *) &cqr->magic, 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) "invalid device in request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) /* Schedule request to be retried. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) cqr->status = DASD_CQR_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) dasd_device_clear_timer(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) void dasd_generic_handle_state_change(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) /* First of all start sense subsystem status request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) dasd_eer_snss(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (device->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (device->block->request_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) blk_mq_run_hw_queues(device->block->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) static int dasd_check_hpf_error(struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return (scsw_tm_is_valid_schxs(&irb->scsw) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) struct dasd_device *device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) u8 *sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (!block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) device = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (!device || !device->discipline->is_ese)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (!device->discipline->is_ese(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) sense = dasd_get_sense(irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (!sense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) return !!(sense[1] & SNS1_NO_REC_FOUND) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) !!(sense[1] & SNS1_FILE_PROTECTED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) static int dasd_ese_oos_cond(u8 *sense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return sense[0] & SNS0_EQUIPMENT_CHECK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) sense[1] & SNS1_PERM_ERR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) sense[1] & SNS1_WRITE_INHIBITED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) sense[25] == 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * Interrupt handler for "normal" ssch-io based dasd devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) struct dasd_ccw_req *cqr, *next, *fcqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) unsigned long now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) int nrf_suppressed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) int fp_suppressed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) u8 *sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) int expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) cqr = (struct dasd_ccw_req *) intparm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (IS_ERR(irb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) switch (PTR_ERR(irb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) case -EIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) cqr->status = DASD_CQR_CLEARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) dasd_device_clear_timer(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) wake_up(&dasd_flush_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) "request timed out\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) "unknown error %ld\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) PTR_ERR(irb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) dasd_handle_killed_request(cdev, intparm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) now = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) /* check for conditions that should be handled immediately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (!cqr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) scsw_cstat(&irb->scsw) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) memcpy(&cqr->irb, irb, sizeof(*irb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) device = dasd_device_from_cdev_locked(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (IS_ERR(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /* ignore unsolicited interrupts for DIAG discipline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (device->discipline == dasd_diag_discipline_pointer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * In some cases 'File Protected' or 'No Record Found' errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * might be expected and debug log messages for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * corresponding interrupts shouldn't be written then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * Check if either of the according suppress bits is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) sense = dasd_get_sense(irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (sense) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * Extent pool probably out-of-space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * Stop device and check exhaust level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (dasd_ese_oos_cond(sense)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) dasd_generic_space_exhaust(device, cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) device->discipline->ext_pool_exhaust(device, cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (!(fp_suppressed || nrf_suppressed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) device->discipline->dump_sense_dbf(device, irb, "int");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (device->features & DASD_FEATURE_ERPLOG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) device->discipline->dump_sense(device, cqr, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) device->discipline->check_for_device_change(device, cqr, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) /* check for for attention message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) device = dasd_device_from_cdev_locked(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (!IS_ERR(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) device->discipline->check_attention(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) irb->esw.esw1.lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (!cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) device = (struct dasd_device *) cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (!device ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) "invalid device in request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (dasd_ese_needs_format(cqr->block, irb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) device->discipline->ese_read(cqr, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) cqr->status = DASD_CQR_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) cqr->stopclk = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) dasd_device_clear_timer(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) fcqr = device->discipline->ese_format(device, cqr, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (IS_ERR(fcqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (PTR_ERR(fcqr) == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) cqr->status = DASD_CQR_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * If we can't format now, let the request go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * one extra round. Maybe we can format later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) cqr->status = DASD_CQR_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) fcqr->status = DASD_CQR_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) cqr->status = DASD_CQR_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) list_add(&fcqr->devlist, &device->ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /* Check for clear pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (cqr->status == DASD_CQR_CLEAR_PENDING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) cqr->status = DASD_CQR_CLEARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) dasd_device_clear_timer(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) wake_up(&dasd_flush_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /* check status - the request might have been killed by dyn detach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (cqr->status != DASD_CQR_IN_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) "status %02x", dev_name(&cdev->dev), cqr->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) expires = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) scsw_cstat(&irb->scsw) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) /* request was completed successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) cqr->status = DASD_CQR_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) cqr->stopclk = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) /* Start first request on queue if possible -> fast_io. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (cqr->devlist.next != &device->ccw_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) next = list_entry(cqr->devlist.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) struct dasd_ccw_req, devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) } else { /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) /* check for HPF error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * call discipline function to requeue all requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * and disable HPF accordingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (cqr->cpmode && dasd_check_hpf_error(irb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) device->discipline->handle_hpf_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) device->discipline->handle_hpf_error(device, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * If we don't want complex ERP for this request, then just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * reset this and retry it in the fastpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) cqr->retries > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (cqr->lpm == dasd_path_get_opm(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) DBF_DEV_EVENT(DBF_DEBUG, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) "default ERP in fastpath "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) "(%i retries left)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) cqr->retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) cqr->lpm = dasd_path_get_opm(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) cqr->status = DASD_CQR_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) next = cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) cqr->status = DASD_CQR_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) if (next && (next->status == DASD_CQR_QUEUED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) (!device->stopped)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (device->discipline->start_IO(next) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) expires = next->expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (expires != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) dasd_device_set_timer(device, expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) dasd_device_clear_timer(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) EXPORT_SYMBOL(dasd_int_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) device = dasd_device_from_cdev_locked(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (IS_ERR(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) device->state != device->target ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) !device->discipline->check_for_device_change){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (device->discipline->dump_sense_dbf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) device->discipline->dump_sense_dbf(device, irb, "uc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) device->discipline->check_for_device_change(device, NULL, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) return UC_TODO_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) * If we have an error on a dasd_block layer request then we cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * and return all further requests from the same dasd_block as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) static void __dasd_device_recovery(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct dasd_ccw_req *ref_cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) struct list_head *l, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * only requeue request that came from the dasd_block layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (!ref_cqr->block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) list_for_each_safe(l, n, &device->ccw_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) cqr = list_entry(l, struct dasd_ccw_req, devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) if (cqr->status == DASD_CQR_QUEUED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) ref_cqr->block == cqr->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) cqr->status = DASD_CQR_CLEARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) * Remove those ccw requests from the queue that need to be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * to the upper layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) static void __dasd_device_process_ccw_queue(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct list_head *final_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) struct list_head *l, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) /* Process request with final status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) list_for_each_safe(l, n, &device->ccw_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) cqr = list_entry(l, struct dasd_ccw_req, devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /* Skip any non-final request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (cqr->status == DASD_CQR_QUEUED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) cqr->status == DASD_CQR_IN_IO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) cqr->status == DASD_CQR_CLEAR_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (cqr->status == DASD_CQR_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) __dasd_device_recovery(device, cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) /* Rechain finished requests to final queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) list_move_tail(&cqr->devlist, final_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) static void __dasd_process_cqr(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) char errorstring[ERRORLENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) switch (cqr->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) case DASD_CQR_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) cqr->status = DASD_CQR_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) case DASD_CQR_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) cqr->status = DASD_CQR_NEED_ERP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) case DASD_CQR_CLEARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) cqr->status = DASD_CQR_TERMINATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* internal error 12 - wrong cqr status*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) "An error occurred in the DASD device driver, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) "reason=%s\n", errorstring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (cqr->callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) cqr->callback(cqr, cqr->callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * the cqrs from the final queue are returned to the upper layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * by setting a dasd_block state and calling the callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) static void __dasd_device_process_final_queue(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct list_head *final_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) struct list_head *l, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) struct dasd_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) list_for_each_safe(l, n, final_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) cqr = list_entry(l, struct dasd_ccw_req, devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) list_del_init(&cqr->devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) block = cqr->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (!block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) __dasd_process_cqr(device, cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) spin_lock_bh(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) __dasd_process_cqr(device, cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) spin_unlock_bh(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * Take a look at the first request on the ccw queue and check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * if it reached its expire time. If so, terminate the IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) static void __dasd_device_check_expire(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (list_empty(&device->ccw_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * IO in safe offline processing should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * run out of retries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) cqr->retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (device->discipline->term_IO(cqr) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) /* Hmpf, try again in 5 sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) "cqr %p timed out (%lus) but cannot be "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) "ended, retrying in 5 s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) cqr, (cqr->expires/HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) cqr->expires += 5*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) dasd_device_set_timer(device, 5*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) "cqr %p timed out (%lus), %i retries "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) "remaining\n", cqr, (cqr->expires/HZ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) cqr->retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * return 1 when device is not eligible for IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) static int __dasd_device_is_unusable(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * dasd is being set offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) * but it is no safe offline where we have to allow I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (device->stopped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (device->stopped & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) /* stopped and CQR will not change that. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) /* CQR is not able to change device to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * operational. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) /* CQR required to get device operational. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * Take a look at the first request on the ccw queue and check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * if it needs to be started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) static void __dasd_device_start_head(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) if (list_empty(&device->ccw_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (cqr->status != DASD_CQR_QUEUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) /* if device is not usable return request to upper layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (__dasd_device_is_unusable(device, cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) cqr->intrc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) cqr->status = DASD_CQR_CLEARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) rc = device->discipline->start_IO(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) dasd_device_set_timer(device, cqr->expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) else if (rc == -EACCES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) /* Hmpf, try again in 1/2 sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) dasd_device_set_timer(device, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) static void __dasd_device_check_path_events(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (!dasd_path_get_tbvpm(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (device->stopped &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) rc = device->discipline->verify_path(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) dasd_path_get_tbvpm(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) dasd_device_set_timer(device, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) dasd_path_clear_all_verify(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) * Go through all request on the dasd_device request queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) * terminate them on the cdev if necessary, and return them to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * submitting layer via callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * Make sure that all 'submitting layers' still exist when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * this function is called!. In other words, when 'device' is a base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * device then all block layer requests must have been removed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * via dasd_flush_block_queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) int dasd_flush_device_queue(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct dasd_ccw_req *cqr, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) struct list_head flush_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) INIT_LIST_HEAD(&flush_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) spin_lock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) /* Check status and move request to flush_queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) switch (cqr->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) case DASD_CQR_IN_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) rc = device->discipline->term_IO(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) /* unable to terminate requeust */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) "Flushing the DASD request queue "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) "failed for request %p\n", cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) /* stop flush processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) goto finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) case DASD_CQR_QUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) cqr->stopclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) cqr->status = DASD_CQR_CLEARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) default: /* no need to modify the others */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) list_move_tail(&cqr->devlist, &flush_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) finished:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) spin_unlock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * After this point all requests must be in state CLEAR_PENDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * one of the others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) wait_event(dasd_flush_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) (cqr->status != DASD_CQR_CLEAR_PENDING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) * Now set each request back to TERMINATED, DONE or NEED_ERP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) * and call the callback function of flushed requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) __dasd_device_process_final_queue(device, &flush_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * Acquire the device lock and process queues for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) static void dasd_device_tasklet(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) struct dasd_device *device = (struct dasd_device *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) struct list_head final_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) atomic_set (&device->tasklet_scheduled, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) INIT_LIST_HEAD(&final_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) spin_lock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) /* Check expire time of first request on the ccw queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) __dasd_device_check_expire(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) /* find final requests on ccw queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) __dasd_device_process_ccw_queue(device, &final_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) __dasd_device_check_path_events(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) spin_unlock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) /* Now call the callback function of requests with final status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) __dasd_device_process_final_queue(device, &final_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) spin_lock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) /* Now check if the head of the ccw queue needs to be started. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) __dasd_device_start_head(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) spin_unlock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (waitqueue_active(&shutdown_waitq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) wake_up(&shutdown_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) * Schedules a call to dasd_tasklet over the device tasklet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) void dasd_schedule_device_bh(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) /* Protect against rescheduling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) tasklet_hi_schedule(&device->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) EXPORT_SYMBOL(dasd_schedule_device_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) device->stopped |= bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) device->stopped &= ~bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) if (!device->stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) wake_up(&generic_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * Queue a request to the head of the device ccw_queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * Start the I/O if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) void dasd_add_request_head(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) cqr->status = DASD_CQR_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) list_add(&cqr->devlist, &device->ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) /* let the bh start the request to keep them in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) EXPORT_SYMBOL(dasd_add_request_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * Queue a request to the tail of the device ccw_queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * Start the I/O if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) void dasd_add_request_tail(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) cqr->status = DASD_CQR_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) list_add_tail(&cqr->devlist, &device->ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) /* let the bh start the request to keep them in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) EXPORT_SYMBOL(dasd_add_request_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * Wakeup helper for the 'sleep_on' functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) cqr->callback_data = DASD_SLEEPON_END_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) wake_up(&generic_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) spin_lock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) spin_unlock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) dasd_erp_fn_t erp_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (cqr->status == DASD_CQR_FILLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (cqr->status == DASD_CQR_TERMINATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) device->discipline->handle_terminated_request(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) if (cqr->status == DASD_CQR_NEED_ERP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) erp_fn = device->discipline->erp_action(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) erp_fn(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (cqr->status == DASD_CQR_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) dasd_log_sense(cqr, &cqr->irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (cqr->refers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) __dasd_process_erp(device, cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (cqr->refers) /* erp is not done yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) return ((cqr->status != DASD_CQR_DONE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) (cqr->status != DASD_CQR_FAILED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return (cqr->status == DASD_CQR_FILLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) struct list_head ccw_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) INIT_LIST_HEAD(&ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) maincqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) device = maincqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) list_add(&maincqr->blocklist, &ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) cqr = list_first_entry(&ccw_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) struct dasd_ccw_req, blocklist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (__dasd_sleep_on_erp(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) if (cqr->status != DASD_CQR_FILLED) /* could be failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) cqr->intrc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) /* Non-temporary stop condition will trigger fail fast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (device->stopped & ~DASD_STOPPED_PENDING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) (!dasd_eer_enabled(device))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) cqr->intrc = -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) * Don't try to start requests if device is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * offline processing, it might wait forever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) cqr->intrc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * Don't try to start requests if device is stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) * except path verification requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) if (interruptible) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) rc = wait_event_interruptible(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) generic_waitq, !(device->stopped));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (rc == -ERESTARTSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) maincqr->intrc = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) wait_event(generic_waitq, !(device->stopped));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (!cqr->callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) cqr->callback = dasd_wakeup_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) cqr->callback_data = DASD_SLEEPON_START_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) dasd_add_request_tail(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) if (interruptible) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) rc = wait_event_interruptible(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) generic_waitq, _wait_for_wakeup(cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (rc == -ERESTARTSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) dasd_cancel_req(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) /* wait (non-interruptible) for final status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) wait_event(generic_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) _wait_for_wakeup(cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) maincqr->intrc = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) wait_event(generic_waitq, _wait_for_wakeup(cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) maincqr->endclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if ((maincqr->status != DASD_CQR_DONE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) (maincqr->intrc != -ERESTARTSYS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) dasd_log_sense(maincqr, &maincqr->irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (maincqr->status == DASD_CQR_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) else if (maincqr->intrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) rc = maincqr->intrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) list_for_each_entry(cqr, ccw_queue, blocklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (cqr->callback_data != DASD_SLEEPON_END_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) struct dasd_ccw_req *cqr, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) u8 *sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) cqr->intrc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) /*Non-temporary stop condition will trigger fail fast*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (device->stopped & ~DASD_STOPPED_PENDING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) !dasd_eer_enabled(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) cqr->intrc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) /*Don't try to start requests if device is stopped*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (interruptible) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) rc = wait_event_interruptible(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) generic_waitq, !device->stopped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) if (rc == -ERESTARTSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) cqr->intrc = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) wait_event(generic_waitq, !(device->stopped));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) if (!cqr->callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) cqr->callback = dasd_wakeup_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) cqr->callback_data = DASD_SLEEPON_START_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) dasd_add_request_tail(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) * In some cases the 'File Protected' or 'Incorrect Length'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) * error might be expected and error recovery would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) * unnecessary in these cases. Check if the according suppress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) * bit is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) sense = dasd_get_sense(&cqr->irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (sense && sense[1] & SNS1_FILE_PROTECTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * for alias devices simplify error recovery and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * return to upper layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) * do not skip ERP requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) if (cqr->startdev != cqr->basedev && !cqr->refers &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) (cqr->status == DASD_CQR_TERMINATED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) cqr->status == DASD_CQR_NEED_ERP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) /* normal recovery for basedev IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) if (__dasd_sleep_on_erp(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) /* handle erp first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) * Queue a request to the tail of the device ccw_queue and wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) * it's completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) int dasd_sleep_on(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) return _dasd_sleep_on(cqr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) EXPORT_SYMBOL(dasd_sleep_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) * Start requests from a ccw_queue and wait for their completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) int dasd_sleep_on_queue(struct list_head *ccw_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) return _dasd_sleep_on_queue(ccw_queue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) EXPORT_SYMBOL(dasd_sleep_on_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) * Start requests from a ccw_queue and wait interruptible for their completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) return _dasd_sleep_on_queue(ccw_queue, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) * Queue a request to the tail of the device ccw_queue and wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) * interruptible for it's completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) return _dasd_sleep_on(cqr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) EXPORT_SYMBOL(dasd_sleep_on_interruptible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) * for eckd devices) the currently running request has to be terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) * and be put back to status queued, before the special request is added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) * to the head of the queue. Then the special request is waited on normally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) static inline int _dasd_term_running_cqr(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (list_empty(&device->ccw_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) rc = device->discipline->term_IO(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) * CQR terminated because a more important request is pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) * Undo decreasing of retry counter because this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) * not an error case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) cqr->retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) cqr->intrc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) spin_lock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) rc = _dasd_term_running_cqr(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) spin_unlock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) cqr->callback = dasd_wakeup_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) cqr->callback_data = DASD_SLEEPON_START_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) cqr->status = DASD_CQR_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) * add new request as second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) * first the terminated cqr needs to be finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) list_add(&cqr->devlist, device->ccw_queue.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) /* let the bh start the request to keep them in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) spin_unlock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) wait_event(generic_waitq, _wait_for_wakeup(cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) if (cqr->status == DASD_CQR_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) else if (cqr->intrc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) rc = cqr->intrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) /* kick tasklets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (device->block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) EXPORT_SYMBOL(dasd_sleep_on_immediatly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * Cancels a request that was started with dasd_sleep_on_req.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * This is useful to timeout requests. The request will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) * terminated if it is currently in i/o.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) * Returns 0 if request termination was successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) * negative error code if termination failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) * Cancellation of a request is an asynchronous operation! The calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) * function has to wait until the request is properly returned via callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) struct dasd_device *device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) switch (cqr->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) case DASD_CQR_QUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) /* request was not started - just set to cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) cqr->status = DASD_CQR_CLEARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) case DASD_CQR_IN_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /* request in IO - terminate IO and release again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) rc = device->discipline->term_IO(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) "Cancelling request %p failed with rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) cqr, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) cqr->stopclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) default: /* already finished or clear pending - do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) int dasd_cancel_req(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) struct dasd_device *device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) rc = __dasd_cancel_req(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) * SECTION: Operations of the dasd_block layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * Timeout function for dasd_block. This is used when the block layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) * is waiting for something that may not come reliably, (e.g. a state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) * change interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) static void dasd_block_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) struct dasd_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) block = from_timer(block, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) /* re-activate request queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) dasd_schedule_block_bh(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) blk_mq_run_hw_queues(block->request_queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) * Setup timeout for a dasd_block in jiffies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) void dasd_block_set_timer(struct dasd_block *block, int expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) if (expires == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) del_timer(&block->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) mod_timer(&block->timer, jiffies + expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) EXPORT_SYMBOL(dasd_block_set_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) * Clear timeout for a dasd_block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) void dasd_block_clear_timer(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) del_timer(&block->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) EXPORT_SYMBOL(dasd_block_clear_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) * Process finished error recovery ccw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) static void __dasd_process_erp(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) dasd_erp_fn_t erp_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) if (cqr->status == DASD_CQR_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) erp_fn = device->discipline->erp_postaction(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) erp_fn(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) blk_status_t error = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) unsigned int proc_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) req = (struct request *) cqr->callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) dasd_profile_end(cqr->block, cqr, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) proc_bytes = cqr->proc_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) status = cqr->block->base->discipline->free_cp(cqr, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) error = errno_to_blk_status(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) else if (status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) switch (cqr->intrc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) case -EPERM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) error = BLK_STS_NEXUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) case -ENOLINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) error = BLK_STS_TRANSPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) error = BLK_STS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) error = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) * We need to take care for ETIMEDOUT errors here since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) * complete callback does not get called in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) * Take care of all errors here and avoid additional code to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) * transfer the error value to the complete callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) blk_mq_end_request(req, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) blk_mq_run_hw_queues(req->q, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) * Partial completed requests can happen with ESE devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * During read we might have gotten a NRF error and have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) * complete a request partially.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) if (proc_bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) blk_update_request(req, BLK_STS_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) blk_rq_bytes(req) - proc_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) blk_mq_requeue_request(req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) } else if (likely(!blk_should_fake_timeout(req->q))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) blk_mq_complete_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) * Process ccw request queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) static void __dasd_process_block_ccw_queue(struct dasd_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) struct list_head *final_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) struct list_head *l, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) dasd_erp_fn_t erp_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) struct dasd_device *base = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) /* Process request with final status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) list_for_each_safe(l, n, &block->ccw_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) cqr = list_entry(l, struct dasd_ccw_req, blocklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) if (cqr->status != DASD_CQR_DONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) cqr->status != DASD_CQR_FAILED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) cqr->status != DASD_CQR_NEED_ERP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) cqr->status != DASD_CQR_TERMINATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) if (cqr->status == DASD_CQR_TERMINATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) base->discipline->handle_terminated_request(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) /* Process requests that may be recovered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) if (cqr->status == DASD_CQR_NEED_ERP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) erp_fn = base->discipline->erp_action(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) if (IS_ERR(erp_fn(cqr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) /* log sense for fatal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) if (cqr->status == DASD_CQR_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) dasd_log_sense(cqr, &cqr->irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) /* First of all call extended error reporting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) if (dasd_eer_enabled(base) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) cqr->status == DASD_CQR_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) /* restart request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) cqr->retries = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) /* Process finished ERP request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) if (cqr->refers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) __dasd_process_erp(base, cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) /* Rechain finished requests to final queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) cqr->endclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) list_move_tail(&cqr->blocklist, final_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) dasd_schedule_block_bh(cqr->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) static void __dasd_block_start_head(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) if (list_empty(&block->ccw_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) /* We allways begin with the first requests on the queue, as some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) * of previously started requests have to be enqueued on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) * dasd_device again for error recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) if (cqr->status != DASD_CQR_FILLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) cqr->intrc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) dasd_schedule_block_bh(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) /* Non-temporary stop condition will trigger fail fast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) if (block->base->stopped & ~DASD_STOPPED_PENDING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) (!dasd_eer_enabled(block->base))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) cqr->intrc = -ENOLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) dasd_schedule_block_bh(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) /* Don't try to start requests if device is stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) if (block->base->stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) /* just a fail safe check, should not happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) if (!cqr->startdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) cqr->startdev = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) /* make sure that the requests we submit find their way back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) cqr->callback = dasd_return_cqr_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) dasd_add_request_tail(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) * Central dasd_block layer routine. Takes requests from the generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) * block layer request queue, creates ccw requests, enqueues them on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) * a dasd_device and processes ccw requests that have been returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) static void dasd_block_tasklet(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) struct dasd_block *block = (struct dasd_block *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) struct list_head final_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) struct list_head *l, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) struct dasd_queue *dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) atomic_set(&block->tasklet_scheduled, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) INIT_LIST_HEAD(&final_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) spin_lock_irq(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) /* Finish off requests on ccw queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) __dasd_process_block_ccw_queue(block, &final_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) spin_unlock_irq(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) /* Now call the callback function of requests with final status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) list_for_each_safe(l, n, &final_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) cqr = list_entry(l, struct dasd_ccw_req, blocklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) dq = cqr->dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) spin_lock_irq(&dq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) list_del_init(&cqr->blocklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) __dasd_cleanup_cqr(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) spin_unlock_irq(&dq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) spin_lock_irq(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) /* Now check if the head of the ccw queue needs to be started. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) __dasd_block_start_head(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) spin_unlock_irq(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) if (waitqueue_active(&shutdown_waitq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) wake_up(&shutdown_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) dasd_put_device(block->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) wake_up(&dasd_flush_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) * Requeue a request back to the block request queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) * only works for block requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) struct dasd_block *block = cqr->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) if (!block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) * If the request is an ERP request there is nothing to requeue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) * This will be done with the remaining original request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) if (cqr->refers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) spin_lock_irq(&cqr->dq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) req = (struct request *) cqr->callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) blk_mq_requeue_request(req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) spin_unlock_irq(&cqr->dq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) * Go through all request on the dasd_block request queue, cancel them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) * on the respective dasd_device, and return them to the generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) * block layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) static int dasd_flush_block_queue(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) struct dasd_ccw_req *cqr, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) struct list_head flush_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) INIT_LIST_HEAD(&flush_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) spin_lock_bh(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) /* if this request currently owned by a dasd_device cancel it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) if (cqr->status >= DASD_CQR_QUEUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) rc = dasd_cancel_req(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) /* Rechain request (including erp chain) so it won't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) * touched by the dasd_block_tasklet anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) * Replace the callback so we notice when the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) * is returned from the dasd_device layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) cqr->callback = _dasd_wake_block_flush_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) list_move_tail(&cqr->blocklist, &flush_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) if (i > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) /* moved more than one request - need to restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) spin_unlock_bh(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) /* Now call the callback function of flushed requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) restart_cb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) /* Process finished ERP request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) if (cqr->refers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) spin_lock_bh(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) __dasd_process_erp(block->base, cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) spin_unlock_bh(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) /* restart list_for_xx loop since dasd_process_erp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) * might remove multiple elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) goto restart_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) /* call the callback function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) spin_lock_irqsave(&cqr->dq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) cqr->endclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) list_del_init(&cqr->blocklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) __dasd_cleanup_cqr(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) spin_unlock_irqrestore(&cqr->dq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) * Schedules a call to dasd_tasklet over the device tasklet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) void dasd_schedule_block_bh(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) /* Protect against rescheduling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) /* life cycle of block is bound to it's base device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) dasd_get_device(block->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) tasklet_hi_schedule(&block->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) EXPORT_SYMBOL(dasd_schedule_block_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) * SECTION: external block device operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) * (request queue handling, open, release, etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) * Dasd request queue function. Called from ll_rw_blk.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) const struct blk_mq_queue_data *qd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) struct dasd_block *block = hctx->queue->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) struct dasd_queue *dq = hctx->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) struct request *req = qd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) struct dasd_device *basedev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) blk_status_t rc = BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) basedev = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) spin_lock_irq(&dq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) if (basedev->state < DASD_STATE_READY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) DBF_DEV_EVENT(DBF_ERR, basedev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) "device not ready for request %p", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) rc = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) * if device is stopped do not fetch new requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) * except failfast is active which will let requests fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) * immediately in __dasd_block_start_head()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) DBF_DEV_EVENT(DBF_ERR, basedev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) "device stopped request %p", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) rc = BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) if (basedev->features & DASD_FEATURE_READONLY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) rq_data_dir(req) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) DBF_DEV_EVENT(DBF_ERR, basedev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) "Rejecting write request %p", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) rc = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) (basedev->features & DASD_FEATURE_FAILFAST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) blk_noretry_request(req))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) DBF_DEV_EVENT(DBF_ERR, basedev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) "Rejecting failfast request %p", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) rc = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) cqr = basedev->discipline->build_cp(basedev, block, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (PTR_ERR(cqr) == -EBUSY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) PTR_ERR(cqr) == -ENOMEM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) PTR_ERR(cqr) == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) rc = BLK_STS_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) DBF_DEV_EVENT(DBF_ERR, basedev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) "CCW creation failed (rc=%ld) on request %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) PTR_ERR(cqr), req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) rc = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) * Note: callback is set to dasd_return_cqr_cb in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) * __dasd_block_start_head to cover erp requests as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) cqr->callback_data = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) cqr->dq = dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) blk_mq_start_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) spin_lock(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) list_add_tail(&cqr->blocklist, &block->ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) INIT_LIST_HEAD(&cqr->devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) dasd_profile_start(block, cqr, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) dasd_schedule_block_bh(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) spin_unlock(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) spin_unlock_irq(&dq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) * Block timeout callback, called from the block layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) * BLK_EH_RESET_TIMER if the request should be left running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) * BLK_EH_DONE if the request is handled or terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) * by the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) struct dasd_block *block = req->q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) cqr = blk_mq_rq_to_pdu(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (!cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) return BLK_EH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) spin_lock_irqsave(&cqr->dq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) device = cqr->startdev ? cqr->startdev : block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) if (!device->blk_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) spin_unlock_irqrestore(&cqr->dq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) return BLK_EH_RESET_TIMER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) DBF_DEV_EVENT(DBF_WARNING, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) " dasd_times_out cqr %p status %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) cqr, cqr->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) spin_lock(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) spin_lock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) cqr->retries = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) cqr->intrc = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) if (cqr->status >= DASD_CQR_QUEUED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) rc = __dasd_cancel_req(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) } else if (cqr->status == DASD_CQR_FILLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) cqr->status == DASD_CQR_NEED_ERP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) cqr->status = DASD_CQR_TERMINATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) } else if (cqr->status == DASD_CQR_IN_ERP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) list_for_each_entry_safe(searchcqr, nextcqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) &block->ccw_queue, blocklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) tmpcqr = searchcqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) while (tmpcqr->refers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) tmpcqr = tmpcqr->refers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) if (tmpcqr != cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) /* searchcqr is an ERP request for cqr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) searchcqr->retries = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) searchcqr->intrc = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) if (searchcqr->status >= DASD_CQR_QUEUED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) rc = __dasd_cancel_req(searchcqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) } else if ((searchcqr->status == DASD_CQR_FILLED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) (searchcqr->status == DASD_CQR_NEED_ERP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) searchcqr->status = DASD_CQR_TERMINATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) } else if (searchcqr->status == DASD_CQR_IN_ERP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) * Shouldn't happen; most recent ERP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) * request is at the front of queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) spin_unlock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) dasd_schedule_block_bh(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) spin_unlock(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) spin_unlock_irqrestore(&cqr->dq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) if (!dq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) spin_lock_init(&dq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) hctx->driver_data = dq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) kfree(hctx->driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) hctx->driver_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) static void dasd_request_done(struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) blk_mq_end_request(req, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) blk_mq_run_hw_queues(req->q, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) static struct blk_mq_ops dasd_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) .queue_rq = do_dasd_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) .complete = dasd_request_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) .timeout = dasd_times_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) .init_hctx = dasd_init_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) .exit_hctx = dasd_exit_hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) * Allocate and initialize request queue and default I/O scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) static int dasd_alloc_queue(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) block->tag_set.ops = &dasd_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) block->tag_set.nr_hw_queues = nr_hw_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) block->tag_set.queue_depth = queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) block->tag_set.numa_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) rc = blk_mq_alloc_tag_set(&block->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) block->request_queue = blk_mq_init_queue(&block->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) if (IS_ERR(block->request_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) return PTR_ERR(block->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) block->request_queue->queuedata = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) * Deactivate and free request queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) static void dasd_free_queue(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) if (block->request_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) blk_cleanup_queue(block->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) blk_mq_free_tag_set(&block->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) block->request_queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) static int dasd_open(struct block_device *bdev, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) struct dasd_device *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) base = dasd_device_from_gendisk(bdev->bd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) if (!base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) atomic_inc(&base->block->open_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) if (!try_module_get(base->discipline->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) if (dasd_probeonly) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) dev_info(&base->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) "Accessing the DASD failed because it is in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) "probeonly mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) rc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) if (base->state <= DASD_STATE_BASIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) DBF_DEV_EVENT(DBF_ERR, base, " %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) " Cannot open unrecognized device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) if ((mode & FMODE_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) (base->features & DASD_FEATURE_READONLY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) rc = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) dasd_put_device(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) module_put(base->discipline->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) atomic_dec(&base->block->open_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) dasd_put_device(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) static void dasd_release(struct gendisk *disk, fmode_t mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) struct dasd_device *base = dasd_device_from_gendisk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) if (base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) atomic_dec(&base->block->open_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) module_put(base->discipline->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) dasd_put_device(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) * Return disk geometry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) struct dasd_device *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) base = dasd_device_from_gendisk(bdev->bd_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) if (!base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) if (!base->discipline ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) !base->discipline->fill_geometry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) dasd_put_device(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) base->discipline->fill_geometry(base->block, geo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) dasd_put_device(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) const struct block_device_operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) dasd_device_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) .open = dasd_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) .release = dasd_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) .ioctl = dasd_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) .compat_ioctl = dasd_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) .getgeo = dasd_getgeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) /*******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) * end of block device operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) dasd_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) dasd_proc_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) dasd_eer_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) kmem_cache_destroy(dasd_page_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) dasd_page_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) dasd_gendisk_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) dasd_devmap_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) if (dasd_debug_area != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) debug_unregister(dasd_debug_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) dasd_debug_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) dasd_statistics_removeroot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) * SECTION: common functions for ccw_driver use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) * Is the device read-only?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) * Note that this function does not report the setting of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) * readonly device attribute, but how it is configured in z/VM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) int dasd_device_is_ro(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) struct ccw_dev_id dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) struct diag210 diag_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) if (!MACHINE_IS_VM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) ccw_device_get_id(device->cdev, &dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) memset(&diag_data, 0, sizeof(diag_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) diag_data.vrdcdvno = dev_id.devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) diag_data.vrdclen = sizeof(diag_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) rc = diag210(&diag_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) if (rc == 0 || rc == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) return diag_data.vrdcvfla & 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) dev_id.devno, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) EXPORT_SYMBOL_GPL(dasd_device_is_ro);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) struct ccw_device *cdev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) ret = ccw_device_set_online(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) pr_warn("%s: Setting the DASD online failed with rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) dev_name(&cdev->dev), ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) * Initial attempt at a probe function. this can be simplified once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) * the other detection code is gone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) int dasd_generic_probe(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) struct dasd_discipline *discipline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) ret = dasd_add_sysfs_files(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) "dasd_generic_probe: could not add "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) "sysfs entries");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) cdev->handler = &dasd_int_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) * Automatically online either all dasd devices (dasd_autodetect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) * or all devices specified with dasd= parameters during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) * initial probe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) async_schedule(dasd_generic_auto_online, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) EXPORT_SYMBOL_GPL(dasd_generic_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) void dasd_generic_free_discipline(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) /* Forget the discipline information. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) if (device->discipline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) if (device->discipline->uncheck_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) device->discipline->uncheck_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) module_put(device->discipline->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) device->discipline = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) if (device->base_discipline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) module_put(device->base_discipline->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) device->base_discipline = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) * This will one day be called from a global not_oper handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) * It is also used by driver_unregister during module unload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) void dasd_generic_remove(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) struct dasd_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) device = dasd_device_from_cdev(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) if (IS_ERR(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) dasd_remove_sysfs_files(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) /* Already doing offline processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) dasd_remove_sysfs_files(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) * This device is removed unconditionally. Set offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) * flag to prevent dasd_open from opening it while it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) * no quite down yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) dasd_set_target_state(device, DASD_STATE_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) cdev->handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) /* dasd_delete_device destroys the device reference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) block = device->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) dasd_delete_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) * life cycle of block is bound to device, so delete it after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) * device was safely removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) if (block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) dasd_free_block(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) dasd_remove_sysfs_files(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) EXPORT_SYMBOL_GPL(dasd_generic_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) * the device is detected for the first time and is supposed to be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) * or the user has started activation through sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) int dasd_generic_set_online(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) struct dasd_discipline *base_discipline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) struct dasd_discipline *discipline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) /* first online clears initial online feature flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) device = dasd_create_device(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) if (IS_ERR(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) return PTR_ERR(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) discipline = base_discipline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) if (device->features & DASD_FEATURE_USEDIAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) if (!dasd_diag_discipline_pointer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) /* Try to load the required module. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) rc = request_module(DASD_DIAG_MOD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) pr_warn("%s Setting the DASD online failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) "because the required module %s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) "could not be loaded (rc=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) dev_name(&cdev->dev), DASD_DIAG_MOD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) dasd_delete_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) /* Module init could have failed, so check again here after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) * request_module(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) if (!dasd_diag_discipline_pointer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) dasd_delete_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) discipline = dasd_diag_discipline_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) if (!try_module_get(base_discipline->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) dasd_delete_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) if (!try_module_get(discipline->owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) module_put(base_discipline->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) dasd_delete_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) device->base_discipline = base_discipline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) device->discipline = discipline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) /* check_device will allocate block device if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) rc = discipline->check_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) dev_name(&cdev->dev), discipline->name, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) module_put(discipline->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) module_put(base_discipline->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) dasd_delete_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) dasd_set_target_state(device, DASD_STATE_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) if (device->state <= DASD_STATE_KNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) dasd_set_target_state(device, DASD_STATE_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) if (device->block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) dasd_free_block(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) dasd_delete_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) pr_debug("dasd_generic device %s found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) wait_event(dasd_init_waitq, _wait_for_device(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) EXPORT_SYMBOL_GPL(dasd_generic_set_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) int dasd_generic_set_offline(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) struct dasd_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) int max_count, open_count, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) device = dasd_device_from_cdev_locked(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) if (IS_ERR(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) return PTR_ERR(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) * We must make sure that this device is currently not in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) * The open_count is increased for every opener, that includes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) * the blkdev_get in dasd_scan_partitions. We are only interested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) * in the other openers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) if (device->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) max_count = device->block->bdev ? 0 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) open_count = atomic_read(&device->block->open_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) if (open_count > max_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) if (open_count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) pr_warn("%s: The DASD cannot be set offline with open count %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) dev_name(&cdev->dev), open_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) pr_warn("%s: The DASD cannot be set offline while it is in use\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) * Test if the offline processing is already running and exit if so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) * If a safe offline is being processed this could only be a normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) * offline that should be able to overtake the safe offline and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) * cancel any I/O we do not want to wait for any longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) set_bit(DASD_FLAG_OFFLINE, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) * if safe_offline is called set safe_offline_running flag and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) * clear safe_offline so that a call to normal offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) * can overrun safe_offline processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) /* need to unlock here to wait for outstanding I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) * If we want to set the device safe offline all IO operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) * should be finished before continuing the offline process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) * so sync bdev first and then wait for our queues to become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) * empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) if (device->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) rc = fsync_bdev(device->block->bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) goto interrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) rc = wait_event_interruptible(shutdown_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) _wait_for_empty_queues(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) goto interrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) * check if a normal offline process overtook the offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) * processing in this case simply do nothing beside returning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) * that we got interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) * otherwise mark safe offline as not running any longer and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) * continue with normal offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) rc = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) dasd_set_target_state(device, DASD_STATE_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) /* dasd_delete_device destroys the device reference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) block = device->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) dasd_delete_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) * life cycle of block is bound to device, so delete it after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) * device was safely removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) if (block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) dasd_free_block(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) interrupted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) /* interrupted by signal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) clear_bit(DASD_FLAG_OFFLINE, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) int dasd_generic_last_path_gone(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) dev_warn(&device->cdev->dev, "No operational channel path is left "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) "for the device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) /* First of all call extended error reporting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) dasd_eer_write(device, NULL, DASD_EER_NOPATH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) if (device->state < DASD_STATE_BASIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) /* Device is active. We want to keep it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) list_for_each_entry(cqr, &device->ccw_queue, devlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) if ((cqr->status == DASD_CQR_IN_IO) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) (cqr->status == DASD_CQR_CLEAR_PENDING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) cqr->status = DASD_CQR_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) cqr->retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) dasd_device_clear_timer(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) int dasd_generic_path_operational(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) dev_info(&device->cdev->dev, "A channel path to the device has become "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) "operational\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) if (device->stopped & DASD_UNRESUMED_PM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) dasd_restore_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) if (device->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) if (device->block->request_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) blk_mq_run_hw_queues(device->block->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) if (!device->stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) wake_up(&generic_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) int dasd_generic_notify(struct ccw_device *cdev, int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) device = dasd_device_from_cdev_locked(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) if (IS_ERR(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) case CIO_GONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) case CIO_BOXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) case CIO_NO_PATH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) dasd_path_no_path(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) ret = dasd_generic_last_path_gone(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) case CIO_OPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) if (dasd_path_get_opm(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) ret = dasd_generic_path_operational(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) EXPORT_SYMBOL_GPL(dasd_generic_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) int chp, oldopm, hpfpm, ifccpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) device = dasd_device_from_cdev_locked(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) if (IS_ERR(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) oldopm = dasd_path_get_opm(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) for (chp = 0; chp < 8; chp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) if (path_event[chp] & PE_PATH_GONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) dasd_path_notoper(device, chp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) if (path_event[chp] & PE_PATH_AVAILABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) dasd_path_available(device, chp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) if (!dasd_path_is_operational(device, chp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) !dasd_path_need_verify(device, chp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) * we can not establish a pathgroup on an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) * unavailable path, so trigger a path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) * verification first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) dasd_path_available(device, chp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) "Pathgroup re-established\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) if (device->discipline->kick_validate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) device->discipline->kick_validate(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) hpfpm = dasd_path_get_hpfpm(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) ifccpm = dasd_path_get_ifccpm(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) if (!dasd_path_get_opm(device) && hpfpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) * device has no operational paths but at least one path is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) * disabled due to HPF errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) * disable HPF at all and use the path(s) again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) if (device->discipline->disable_hpf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) device->discipline->disable_hpf(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) dasd_path_set_tbvpm(device, hpfpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) dasd_schedule_requeue(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) } else if (!dasd_path_get_opm(device) && ifccpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) * device has no operational paths but at least one path is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) * disabled due to IFCC errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) * trigger path verification on paths with IFCC errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) dasd_path_set_tbvpm(device, ifccpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) "No verified channel paths remain for the device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) DBF_DEV_EVENT(DBF_WARNING, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) "%s", "last verified path gone");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) dasd_eer_write(device, NULL, DASD_EER_NOPATH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) dasd_device_set_stop_bits(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) DASD_STOPPED_DC_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) EXPORT_SYMBOL_GPL(dasd_generic_path_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) if (!dasd_path_get_opm(device) && lpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) dasd_path_set_opm(device, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) dasd_generic_path_operational(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) dasd_path_add_opm(device, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) void dasd_generic_space_exhaust(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) dasd_eer_write(device, NULL, DASD_EER_NOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) if (device->state < DASD_STATE_BASIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) if (cqr->status == DASD_CQR_IN_IO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) cqr->status == DASD_CQR_CLEAR_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) cqr->status = DASD_CQR_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) cqr->retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) dasd_device_clear_timer(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) void dasd_generic_space_avail(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) dev_info(&device->cdev->dev, "Extent pool space is available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) if (device->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) if (device->block->request_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) blk_mq_run_hw_queues(device->block->request_queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) if (!device->stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) wake_up(&generic_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) * clear active requests and requeue them to block layer if possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) static int dasd_generic_requeue_all_requests(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) struct list_head requeue_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) struct dasd_ccw_req *cqr, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) struct dasd_ccw_req *refers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) INIT_LIST_HEAD(&requeue_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) spin_lock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) /* Check status and move request to flush_queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) if (cqr->status == DASD_CQR_IN_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) rc = device->discipline->term_IO(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) /* unable to terminate requeust */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) "Unable to terminate request %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) "on suspend\n", cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) spin_unlock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) list_move_tail(&cqr->devlist, &requeue_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) spin_unlock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) wait_event(dasd_flush_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) (cqr->status != DASD_CQR_CLEAR_PENDING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) * requeue requests to blocklayer will only work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) * for block device requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) if (_dasd_requeue_request(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) /* remove requests from device and block queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) list_del_init(&cqr->devlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) while (cqr->refers != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) refers = cqr->refers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) /* remove the request from the block queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) list_del(&cqr->blocklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) /* free the finished erp request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) dasd_free_erp_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) cqr = refers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) * _dasd_requeue_request already checked for a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) * blockdevice, no need to check again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) * all erp requests (cqr->refers) have a cqr->block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) * pointer copy from the original cqr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) list_del_init(&cqr->blocklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) cqr->block->base->discipline->free_cp(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) cqr, (struct request *) cqr->callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) * if requests remain then they are internal request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) * and go back to the device queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) if (!list_empty(&requeue_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) /* move freeze_queue to start of the ccw_queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) spin_lock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) list_splice_tail(&requeue_queue, &device->ccw_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) spin_unlock_irq(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) static void do_requeue_requests(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) struct dasd_device *device = container_of(work, struct dasd_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) requeue_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) dasd_generic_requeue_all_requests(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) if (device->block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) void dasd_schedule_requeue(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) /* queue call to dasd_reload_device to the kernel event daemon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) if (!schedule_work(&device->requeue_requests))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) EXPORT_SYMBOL(dasd_schedule_requeue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) int dasd_generic_pm_freeze(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) struct dasd_device *device = dasd_device_from_cdev(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) if (IS_ERR(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) return PTR_ERR(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) /* mark device as suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) set_bit(DASD_FLAG_SUSPENDED, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) if (device->discipline->freeze)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) device->discipline->freeze(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) /* disallow new I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) return dasd_generic_requeue_all_requests(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) int dasd_generic_restore_device(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) struct dasd_device *device = dasd_device_from_cdev(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) if (IS_ERR(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) return PTR_ERR(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) /* allow new IO again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) dasd_device_remove_stop_bits(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) * call discipline restore function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) * if device is stopped do nothing e.g. for disconnected devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) if (device->discipline->restore && !(device->stopped))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) rc = device->discipline->restore(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) if (rc || device->stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) * if the resume failed for the DASD we put it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) * an UNRESUMED stop state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) device->stopped |= DASD_UNRESUMED_PM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) if (device->block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) if (device->block->request_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) blk_mq_run_hw_queues(device->block->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) int rdc_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) int magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) /* internal error 13 - Allocating the RDC request failed*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) "An error occurred in the DASD device driver, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) "reason=%s\n", "13");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) ccw->cmd_code = CCW_CMD_RDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) ccw->cda = (__u32)(addr_t) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) ccw->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) ccw->count = rdc_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) cqr->expires = 10*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) cqr->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) void *rdc_buffer, int rdc_buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) return PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) ret = dasd_sleep_on(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) * In command mode and transport mode we need to look for sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) * data in different places. The sense data itself is allways
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) * an array of 32 bytes, so we can unify the sense data access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) * for both modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) char *dasd_get_sense(struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) struct tsb *tsb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) char *sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) if (irb->scsw.tm.tcw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) tsb = tcw_get_tsb((struct tcw *)(unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) irb->scsw.tm.tcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) if (tsb && tsb->length == 64 && tsb->flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) switch (tsb->flags & 0x07) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) case 1: /* tsa_iostat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) sense = tsb->tsa.iostat.sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) case 2: /* tsa_ddpc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) sense = tsb->tsa.ddpc.sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) /* currently we don't use interrogate data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) } else if (irb->esw.esw0.erw.cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) sense = irb->ecw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) return sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) EXPORT_SYMBOL_GPL(dasd_get_sense);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) void dasd_generic_shutdown(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) device = dasd_device_from_cdev(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) if (IS_ERR(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) if (device->block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) wait_event(shutdown_waitq, _wait_for_empty_queues(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) static int __init dasd_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) init_waitqueue_head(&dasd_init_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) init_waitqueue_head(&dasd_flush_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) init_waitqueue_head(&generic_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) init_waitqueue_head(&shutdown_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) /* register 'common' DASD debug area, used for all DBF_XXX calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) if (dasd_debug_area == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) debug_register_view(dasd_debug_area, &debug_sprintf_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) debug_set_level(dasd_debug_area, DBF_WARNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) DBF_EVENT(DBF_EMERG, "%s", "debug area created");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) dasd_diag_discipline_pointer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) dasd_statistics_createroot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) rc = dasd_devmap_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) rc = dasd_gendisk_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) rc = dasd_parse();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) rc = dasd_eer_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) rc = dasd_proc_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) pr_info("The DASD device driver could not be initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) dasd_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) module_init(dasd_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) module_exit(dasd_exit);