^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Horst Hummel <Horst.Hummel@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Carsten Otte <Cotte@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Martin Schwidefsky <schwidefsky@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Bugreports.to..: <Linux390@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright IBM Corp. 1999, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Author.........: Nigel Hislop <hislop_nigel@emc.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define KMSG_COMPONENT "dasd-eckd"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/hdreg.h> /* HDIO_GETGEO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/css_chars.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/idals.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/ebcdic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/cio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/ccwdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/itcw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/schid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/chpid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "dasd_int.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "dasd_eckd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #ifdef PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #undef PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #endif /* PRINTK_HEADER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define PRINTK_HEADER "dasd(eckd):"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * raw track access always map to 64k in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * so it maps to 16 blocks of 4k per track
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define DASD_RAW_BLOCK_PER_TRACK 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define DASD_RAW_BLOCKSIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* 64k are 128 x 512 byte sectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define DASD_RAW_SECTORS_PER_TRACK 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static struct dasd_discipline dasd_eckd_discipline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* The ccw bus type uses this table to find devices that it sends to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * dasd_eckd_probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static struct ccw_device_id dasd_eckd_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) { /* end of list */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static struct ccw_driver dasd_eckd_driver; /* see below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static void *rawpadpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define INIT_CQR_OK 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define INIT_CQR_UNFORMATTED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define INIT_CQR_ERROR 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* emergency request for reserve/release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct dasd_ccw_req cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct ccw1 ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) char data[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) } *dasd_reserve_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static DEFINE_MUTEX(dasd_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct dasd_ccw_req cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct ccw1 ccw[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) char data[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) } *dasd_vol_info_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static DEFINE_MUTEX(dasd_vol_info_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct ext_pool_exhaust_work_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct work_struct worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct dasd_device *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* definitions for the path verification worker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct path_verification_work_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct work_struct worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct dasd_ccw_req cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct ccw1 ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int isglobal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) __u8 tbvpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static struct path_verification_work_data *path_verification_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static DEFINE_MUTEX(dasd_path_verification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct check_attention_work_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct work_struct worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) __u8 lpum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int dasd_eckd_ext_pool_id(struct dasd_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct dasd_device *, struct dasd_device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned int, int, unsigned int, unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned int, unsigned int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* initial attempt at a probe function. this can be simplified once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * the other detection code is gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) dasd_eckd_probe (struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* set ECKD specific ccw-device options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) "dasd_eckd_probe: could not set "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) "ccw-device options");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) dasd_eckd_set_online(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static const int sizes_trk0[] = { 28, 148, 84 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define LABEL_SIZE 140
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* head and record addresses of count_area read in analysis ccw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static const int count_area_head[] = { 0, 0, 0, 0, 1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ceil_quot(unsigned int d1, unsigned int d2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return (d1 + (d2 - 1)) / d2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) recs_per_track(struct dasd_eckd_characteristics * rdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned int kl, unsigned int dl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int dn, kn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) switch (rdc->dev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) case 0x3380:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (kl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ceil_quot(dl + 12, 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return 1499 / (15 + ceil_quot(dl + 12, 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) case 0x3390:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) dn = ceil_quot(dl + 6, 232) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (kl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) kn = ceil_quot(kl + 6, 232) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 9 + ceil_quot(dl + 6 * dn, 34));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) case 0x9345:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dn = ceil_quot(dl + 6, 232) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (kl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) kn = ceil_quot(kl + 6, 232) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ceil_quot(dl + 6 * dn, 34));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) geo->cyl = (__u16) cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) geo->head = cyl >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) geo->head <<= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) geo->head |= head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * calculate failing track from sense data depending if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * it is an EAV device or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) sector_t *track)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u8 *sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u32 cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u8 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) sense = dasd_get_sense(irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (!sense) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) "ESE error no sense data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!(sense[27] & DASD_SENSE_BIT_2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) "ESE error no valid track data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (sense[27] & DASD_SENSE_BIT_3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* enhanced addressing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) cyl = sense[30] << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) cyl |= (sense[31] & 0xF0) << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cyl |= sense[28] << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) cyl |= sense[29];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) cyl = sense[29] << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) cyl |= sense[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) head = sense[31] & 0x0F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *track = cyl * private->rdc_data.trk_per_cyl + head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) rc = get_phys_clock(&data->ep_sys_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Ignore return code if XRC is not supported or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * sync clock is switched off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if ((rc && !private->rdc_data.facilities.XRC_supported) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) rc == -EOPNOTSUPP || rc == -EACCES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* switch on System Time Stamp - needed for XRC Support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (ccw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ccw->count = sizeof(struct DE_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) unsigned int totrk, int cmd, struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int blksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u16 heads, beghead, endhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u32 begcyl, endcyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (ccw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ccw->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ccw->count = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ccw->cda = (__u32)__pa(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) memset(data, 0, sizeof(struct DE_eckd_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) case DASD_ECKD_CCW_READ_HOME_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) case DASD_ECKD_CCW_READ_RECORD_ZERO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) case DASD_ECKD_CCW_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) case DASD_ECKD_CCW_READ_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) case DASD_ECKD_CCW_READ_CKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) case DASD_ECKD_CCW_READ_CKD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) case DASD_ECKD_CCW_READ_KD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) case DASD_ECKD_CCW_READ_KD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) data->mask.perm = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) data->attributes.operation = private->attrib.operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) case DASD_ECKD_CCW_READ_COUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) data->mask.perm = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) data->attributes.operation = DASD_BYPASS_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) case DASD_ECKD_CCW_READ_TRACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) case DASD_ECKD_CCW_READ_TRACK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) data->mask.perm = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) data->attributes.operation = private->attrib.operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) data->blk_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) case DASD_ECKD_CCW_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) case DASD_ECKD_CCW_WRITE_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) case DASD_ECKD_CCW_WRITE_KD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) case DASD_ECKD_CCW_WRITE_KD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) data->mask.perm = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) data->attributes.operation = private->attrib.operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) rc = set_timestamp(ccw, data, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) case DASD_ECKD_CCW_WRITE_CKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) case DASD_ECKD_CCW_WRITE_CKD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) data->attributes.operation = DASD_BYPASS_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) rc = set_timestamp(ccw, data, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) case DASD_ECKD_CCW_ERASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) data->mask.perm = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) data->mask.auth = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) data->attributes.operation = DASD_BYPASS_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) rc = set_timestamp(ccw, data, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) case DASD_ECKD_CCW_WRITE_FULL_TRACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) data->mask.perm = 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) data->attributes.operation = private->attrib.operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) data->blk_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) case DASD_ECKD_CCW_WRITE_TRACK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) data->mask.perm = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) data->attributes.operation = private->attrib.operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) data->blk_size = blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) rc = set_timestamp(ccw, data, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) "0x%x is not a known command\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) data->attributes.mode = 0x3; /* ECKD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if ((private->rdc_data.cu_type == 0x2105 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) private->rdc_data.cu_type == 0x2107 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) private->rdc_data.cu_type == 0x1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) && !(private->uses_cdl && trk < 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) data->ga_extended |= 0x40; /* Regular Data Format Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) heads = private->rdc_data.trk_per_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) begcyl = trk / heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) beghead = trk % heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) endcyl = totrk / heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) endhead = totrk % heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* check for sequential prestage - enhance cylinder range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) data->attributes.operation == DASD_SEQ_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (endcyl + private->attrib.nr_cyl < private->real_cyl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) endcyl += private->attrib.nr_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) endcyl = (private->real_cyl - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) set_ch_t(&data->beg_ext, begcyl, beghead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) set_ch_t(&data->end_ext, endcyl, endhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned int trk, unsigned int rec_on_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int count, int cmd, struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unsigned int reclen, unsigned int tlf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int dn, d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (ccw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ccw->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ccw->count = 22;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ccw->count = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ccw->cda = (__u32)__pa(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) memset(data, 0, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (rec_on_trk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) switch (private->rdc_data.dev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) case 0x3390:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) dn = ceil_quot(reclen + 6, 232);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) case 0x3380:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) d = 7 + ceil_quot(reclen + 12, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) data->sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* note: meaning of count depends on the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * for record based I/O it's the number of records, but for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * track based I/O it's the number of tracks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) data->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) data->operation.orientation = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) data->operation.operation = 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) case DASD_ECKD_CCW_READ_HOME_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) data->operation.orientation = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) data->operation.operation = 0x16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) data->operation.orientation = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) data->operation.operation = 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) data->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) case DASD_ECKD_CCW_READ_RECORD_ZERO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) data->operation.orientation = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) data->operation.operation = 0x16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) data->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) case DASD_ECKD_CCW_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) case DASD_ECKD_CCW_WRITE_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) case DASD_ECKD_CCW_WRITE_KD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) case DASD_ECKD_CCW_WRITE_KD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) data->auxiliary.length_valid = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) data->length = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) data->operation.operation = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) case DASD_ECKD_CCW_WRITE_CKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) case DASD_ECKD_CCW_WRITE_CKD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) data->auxiliary.length_valid = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) data->length = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) data->operation.operation = 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) case DASD_ECKD_CCW_WRITE_FULL_TRACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) data->operation.orientation = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) data->operation.operation = 0x3F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) data->extended_operation = 0x11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) data->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) data->extended_parameter_length = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (data->count > 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) data->extended_parameter[0] = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) data->extended_parameter[1] = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) data->extended_parameter[1] <<= (16 - count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) data->extended_parameter[0] = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) data->extended_parameter[0] <<= (8 - count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) data->extended_parameter[1] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) data->sector = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) case DASD_ECKD_CCW_WRITE_TRACK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) data->auxiliary.length_valid = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) data->length = reclen; /* not tlf, as one might think */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) data->operation.operation = 0x3F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) data->extended_operation = 0x23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) case DASD_ECKD_CCW_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) case DASD_ECKD_CCW_READ_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) case DASD_ECKD_CCW_READ_KD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) case DASD_ECKD_CCW_READ_KD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) data->auxiliary.length_valid = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) data->length = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) data->operation.operation = 0x06;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) case DASD_ECKD_CCW_READ_CKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) case DASD_ECKD_CCW_READ_CKD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) data->auxiliary.length_valid = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) data->length = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) data->operation.operation = 0x16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) case DASD_ECKD_CCW_READ_COUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) data->operation.operation = 0x06;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) case DASD_ECKD_CCW_READ_TRACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) data->operation.orientation = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) data->operation.operation = 0x0C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) data->extended_parameter_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) data->sector = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) case DASD_ECKD_CCW_READ_TRACK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) data->auxiliary.length_valid = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) data->length = tlf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) data->operation.operation = 0x0C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) case DASD_ECKD_CCW_ERASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) data->length = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) data->auxiliary.length_valid = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) data->operation.operation = 0x0b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) DBF_DEV_EVENT(DBF_ERR, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) "fill LRE unknown opcode 0x%x", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) set_ch_t(&data->seek_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) trk / private->rdc_data.trk_per_cyl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) trk % private->rdc_data.trk_per_cyl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) data->search_arg.cyl = data->seek_addr.cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) data->search_arg.head = data->seek_addr.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) data->search_arg.record = rec_on_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) unsigned int trk, unsigned int totrk, int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct dasd_device *basedev, struct dasd_device *startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsigned int format, unsigned int rec_on_trk, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) unsigned int blksize, unsigned int tlf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct dasd_eckd_private *basepriv, *startpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct LRE_eckd_data *lredata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct DE_eckd_data *dedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) basepriv = basedev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) startpriv = startdev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dedata = &pfxdata->define_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) lredata = &pfxdata->locate_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ccw->cmd_code = DASD_ECKD_CCW_PFX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ccw->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ccw->count = sizeof(*pfxdata) + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ccw->cda = (__u32) __pa(pfxdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) memset(pfxdata, 0, sizeof(*pfxdata) + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) ccw->count = sizeof(*pfxdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) ccw->cda = (__u32) __pa(pfxdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) memset(pfxdata, 0, sizeof(*pfxdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* prefix data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (format > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) DBF_DEV_EVENT(DBF_ERR, basedev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) "PFX LRE unknown format 0x%x", format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) pfxdata->format = format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) pfxdata->base_address = basepriv->ned->unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pfxdata->base_lss = basepriv->ned->ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) pfxdata->validity.define_extent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* private uid is kept up to date, conf_data may be outdated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) pfxdata->validity.verify_base = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) pfxdata->validity.verify_base = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) pfxdata->validity.hyper_pav = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * For some commands the System Time Stamp is set in the define extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * data when XRC is supported. The validity of the time stamp must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * reflected in the prefix data as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (format == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) basedev, blksize, tlf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) unsigned int trk, unsigned int totrk, int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct dasd_device *basedev, struct dasd_device *startdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 0, 0, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned int rec_on_trk, int no_rec, int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct dasd_device * device, int reclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) int sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) int dn, d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) DBF_DEV_EVENT(DBF_INFO, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) trk, rec_on_trk, no_rec, cmd, reclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ccw->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ccw->count = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ccw->cda = (__u32) __pa(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) memset(data, 0, sizeof(struct LO_eckd_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (rec_on_trk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) switch (private->rdc_data.dev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) case 0x3390:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) dn = ceil_quot(reclen + 6, 232);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) case 0x3380:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) d = 7 + ceil_quot(reclen + 12, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) data->sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) data->count = no_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) data->operation.orientation = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) data->operation.operation = 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) case DASD_ECKD_CCW_READ_HOME_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) data->operation.orientation = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) data->operation.operation = 0x16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) data->operation.orientation = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) data->operation.operation = 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) data->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) case DASD_ECKD_CCW_READ_RECORD_ZERO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) data->operation.orientation = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) data->operation.operation = 0x16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) data->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) case DASD_ECKD_CCW_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) case DASD_ECKD_CCW_WRITE_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) case DASD_ECKD_CCW_WRITE_KD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) case DASD_ECKD_CCW_WRITE_KD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) data->auxiliary.last_bytes_used = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) data->length = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) data->operation.operation = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) case DASD_ECKD_CCW_WRITE_CKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) case DASD_ECKD_CCW_WRITE_CKD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) data->auxiliary.last_bytes_used = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) data->length = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) data->operation.operation = 0x03;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) case DASD_ECKD_CCW_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) case DASD_ECKD_CCW_READ_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) case DASD_ECKD_CCW_READ_KD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) case DASD_ECKD_CCW_READ_KD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) data->auxiliary.last_bytes_used = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) data->length = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) data->operation.operation = 0x06;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) case DASD_ECKD_CCW_READ_CKD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) case DASD_ECKD_CCW_READ_CKD_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) data->auxiliary.last_bytes_used = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) data->length = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) data->operation.operation = 0x16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) case DASD_ECKD_CCW_READ_COUNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) data->operation.operation = 0x06;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) case DASD_ECKD_CCW_ERASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) data->length = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) data->auxiliary.last_bytes_used = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) data->operation.operation = 0x0b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) "opcode 0x%x", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) set_ch_t(&data->seek_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) trk / private->rdc_data.trk_per_cyl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) trk % private->rdc_data.trk_per_cyl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) data->search_arg.cyl = data->seek_addr.cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) data->search_arg.head = data->seek_addr.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) data->search_arg.record = rec_on_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * Returns 1 if the block is one of the special blocks that needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * to get read/written with the KD variant of the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * Luckily the KD variants differ only by one bit (0x08) from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * normal variant. So don't wonder about code like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * if (dasd_eckd_cdl_special(blk_per_trk, recid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * ccw->cmd_code |= 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) dasd_eckd_cdl_special(int blk_per_trk, int recid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (recid < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (recid < blk_per_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (recid < 2 * blk_per_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Returns the record size for the special blocks of the cdl format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Only returns something useful if dasd_eckd_cdl_special is true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * for the recid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dasd_eckd_cdl_reclen(int recid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (recid < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return sizes_trk0[recid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return LABEL_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* create unique id from private structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static void create_uid(struct dasd_eckd_private *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct dasd_uid *uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) uid = &private->uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) memset(uid, 0, sizeof(struct dasd_uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) memcpy(uid->vendor, private->ned->HDA_manufacturer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) sizeof(uid->vendor) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) memcpy(uid->serial, private->ned->HDA_location,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) sizeof(uid->serial) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) EBCASC(uid->serial, sizeof(uid->serial) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) uid->ssid = private->gneq->subsystemID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) uid->real_unit_addr = private->ned->unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (private->sneq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) uid->type = private->sneq->sua_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (uid->type == UA_BASE_PAV_ALIAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) uid->base_unit_addr = private->sneq->base_unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) uid->type = UA_BASE_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (private->vdsneq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) for (count = 0; count < 16; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) sprintf(uid->vduit+2*count, "%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) private->vdsneq->uit[count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * Generate device unique id that specifies the physical device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static int dasd_eckd_generate_uid(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (!private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!private->ned || !private->gneq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) create_uid(private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) *uid = private->uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * compare device UID with data of a given dasd_eckd_private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * return 0 for match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static int dasd_eckd_compare_path_uid(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct dasd_eckd_private *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct dasd_uid device_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) create_uid(private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) dasd_eckd_get_uid(device, &device_uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct dasd_ccw_req *cqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) __u8 *rcd_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) __u8 lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * buffer has to start with EBCDIC "V1.0" to show
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * support for virtual device SNEQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) rcd_buffer[0] = 0xE5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) rcd_buffer[1] = 0xF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) rcd_buffer[2] = 0x4B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) rcd_buffer[3] = 0xF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ccw->cmd_code = DASD_ECKD_CCW_RCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ccw->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ccw->cda = (__u32)(addr_t)rcd_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ccw->count = DASD_ECKD_RCD_DATA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) cqr->magic = DASD_ECKD_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) cqr->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) cqr->expires = 10*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) cqr->lpm = lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) cqr->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * Wakeup helper for read_conf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * if the cqr is not done and needs some error recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * the buffer has to be re-initialized with the EBCDIC "V1.0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * to show support for virtual device SNEQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) __u8 *rcd_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (cqr->status != DASD_CQR_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) rcd_buffer = (__u8 *)((addr_t) ccw->cda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) memset(rcd_buffer, 0, sizeof(*rcd_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) rcd_buffer[0] = 0xE5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) rcd_buffer[1] = 0xF1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) rcd_buffer[2] = 0x4B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) rcd_buffer[3] = 0xF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) dasd_wakeup_cb(cqr, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct dasd_ccw_req *cqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) __u8 *rcd_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) __u8 lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct ciw *ciw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * sanity check: scan for RCD command in extended SenseID data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * some devices do not support RCD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) cqr->retries = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) cqr->callback = read_conf_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) rc = dasd_sleep_on_immediatly(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) void **rcd_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) int *rcd_buffer_size, __u8 lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct ciw *ciw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) char *rcd_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * sanity check: scan for RCD command in extended SenseID data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * some devices do not support RCD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (!rcd_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 0, /* use rcd_buf as data ara */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) "Could not allocate RCD request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) cqr->callback = read_conf_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) ret = dasd_sleep_on(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * on success we update the user input parms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) *rcd_buffer = rcd_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) kfree(rcd_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) *rcd_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) *rcd_buffer_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct dasd_sneq *sneq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) int i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) private->ned = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) private->sneq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) private->vdsneq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) private->gneq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) count = private->conf_len / sizeof(struct dasd_sneq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) sneq = (struct dasd_sneq *)private->conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) for (i = 0; i < count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (sneq->flags.identifier == 1 && sneq->format == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) private->sneq = sneq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) else if (sneq->flags.identifier == 1 && sneq->format == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) private->vdsneq = (struct vd_sneq *)sneq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) else if (sneq->flags.identifier == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) private->gneq = (struct dasd_gneq *)sneq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) private->ned = (struct dasd_ned *)sneq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) sneq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (!private->ned || !private->gneq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) private->ned = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) private->sneq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) private->vdsneq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) private->gneq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) struct dasd_gneq *gneq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) int i, count, found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) count = conf_len / sizeof(*gneq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) gneq = (struct dasd_gneq *)conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) for (i = 0; i < count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (gneq->flags.identifier == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) gneq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return ((char *)gneq)[18] & 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) static void dasd_eckd_clear_conf_data(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) private->conf_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) private->conf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) kfree(device->path[i].conf_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) device->path[i].conf_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) device->path[i].cssid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) device->path[i].ssid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) device->path[i].chpid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static int dasd_eckd_read_conf(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) void *conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int conf_len, conf_data_saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int rc, path_err, pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) __u8 lpm, opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct dasd_eckd_private *private, path_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct dasd_uid *uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) char print_path_uid[60], print_device_uid[60];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct channel_path_desc_fmt0 *chp_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct subchannel_id sch_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) opm = ccw_device_get_path_mask(device->cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ccw_device_get_schid(device->cdev, &sch_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) conf_data_saved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) path_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /* get configuration data per operational path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) for (lpm = 0x80; lpm; lpm>>= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (!(lpm & opm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) rc = dasd_eckd_read_conf_lpm(device, &conf_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) &conf_len, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) "Read configuration data returned "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) "error %d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (conf_data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) "No configuration data "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) "retrieved");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /* no further analysis possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dasd_path_add_opm(device, opm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) continue; /* no error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* save first valid configuration data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (!conf_data_saved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* initially clear previously stored conf_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) dasd_eckd_clear_conf_data(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) private->conf_data = conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) private->conf_len = conf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (dasd_eckd_identify_conf_parts(private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) private->conf_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) private->conf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) kfree(conf_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) pos = pathmask_to_pos(lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /* store per path conf_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) device->path[pos].conf_data = conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) device->path[pos].cssid = sch_id.cssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) device->path[pos].ssid = sch_id.ssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (chp_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) device->path[pos].chpid = chp_desc->chpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) kfree(chp_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * build device UID that other path data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * can be compared to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) dasd_eckd_generate_uid(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) conf_data_saved++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) path_private.conf_data = conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (dasd_eckd_identify_conf_parts(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) &path_private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) path_private.conf_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) path_private.conf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) kfree(conf_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (dasd_eckd_compare_path_uid(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) device, &path_private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) uid = &path_private.uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (strlen(uid->vduit) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) snprintf(print_path_uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) sizeof(print_path_uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) "%s.%s.%04x.%02x.%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) uid->vendor, uid->serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) uid->ssid, uid->real_unit_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) uid->vduit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) snprintf(print_path_uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) sizeof(print_path_uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) "%s.%s.%04x.%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) uid->vendor, uid->serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) uid->ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) uid->real_unit_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) uid = &private->uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (strlen(uid->vduit) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) snprintf(print_device_uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) sizeof(print_device_uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) "%s.%s.%04x.%02x.%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) uid->vendor, uid->serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) uid->ssid, uid->real_unit_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) uid->vduit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) snprintf(print_device_uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) sizeof(print_device_uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) "%s.%s.%04x.%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) uid->vendor, uid->serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) uid->ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) uid->real_unit_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) "Not all channel paths lead to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) "the same device, path %02X leads to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) "device %s instead of %s\n", lpm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) print_path_uid, print_device_uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) path_err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) dasd_path_add_cablepm(device, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) pos = pathmask_to_pos(lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* store per path conf_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) device->path[pos].conf_data = conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) device->path[pos].cssid = sch_id.cssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) device->path[pos].ssid = sch_id.ssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (chp_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) device->path[pos].chpid = chp_desc->chpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) kfree(chp_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) path_private.conf_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) path_private.conf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) switch (dasd_eckd_path_access(conf_data, conf_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) case 0x02:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) dasd_path_add_nppm(device, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) case 0x03:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dasd_path_add_ppm(device, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (!dasd_path_get_opm(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) dasd_path_set_opm(device, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) dasd_generic_path_operational(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) dasd_path_add_opm(device, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return path_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static u32 get_fcx_max_data(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int fcx_in_css, fcx_in_gneq, fcx_in_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) unsigned int mdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int tpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (dasd_nofcx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* is transport mode supported? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) fcx_in_css = css_general_characteristics.fcx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) fcx_in_features = private->features.feature[40] & 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (!tpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) mdc = ccw_device_get_mdc(device->cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (mdc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return (u32)mdc * FCX_MAX_DATA_FACTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) unsigned int mdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) u32 fcx_max_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (private->fcx_max_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) mdc = ccw_device_get_mdc(device->cdev, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (mdc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) "Detecting the maximum data size for zHPF "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) "requests failed (rc=%d) for a new path %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) mdc, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return mdc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (fcx_max_data < private->fcx_max_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) "The maximum data size for zHPF requests %u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) "on a new path %x is below the active maximum "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) "%u\n", fcx_max_data, lpm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) private->fcx_max_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static int rebuild_device_uid(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) struct path_verification_work_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) __u8 lpm, opm = dasd_path_get_opm(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) int rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) for (lpm = 0x80; lpm; lpm >>= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (!(lpm & opm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) memset(&data->cqr, 0, sizeof(data->cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) data->cqr.cpaddr = &data->ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) data->rcd_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) "Read configuration data "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) "returned error %d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) memcpy(private->conf_data, data->rcd_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) DASD_ECKD_RCD_DATA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (dasd_eckd_identify_conf_parts(private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) } else /* first valid path is enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) rc = dasd_eckd_generate_uid(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static void do_path_verification_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct path_verification_work_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct dasd_eckd_private path_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct dasd_uid *uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) char print_uid[60];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) data = container_of(work, struct path_verification_work_data, worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) device = data->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /* delay path verification until device was resumed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) schedule_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /* check if path verification already running and delay if so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) schedule_work(work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) opm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) npm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) ppm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) epm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) hpfpm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) cablepm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) for (lpm = 0x80; lpm; lpm >>= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (!(lpm & data->tbvpm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) memset(&data->cqr, 0, sizeof(data->cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) data->cqr.cpaddr = &data->ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) data->rcd_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) switch (dasd_eckd_path_access(data->rcd_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) DASD_ECKD_RCD_DATA_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) case 0x02:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) npm |= lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) case 0x03:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) ppm |= lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) opm |= lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) } else if (rc == -EOPNOTSUPP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) "path verification: No configuration "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) "data retrieved");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) opm |= lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) } else if (rc == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) "path verification: device is stopped,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) " try again later");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) epm |= lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) "Reading device feature codes failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) "(rc=%d) for new path %x\n", rc, lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (verify_fcx_max_data(device, lpm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) opm &= ~lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) npm &= ~lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) ppm &= ~lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) hpfpm |= lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * save conf_data for comparison after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * rebuild_device_uid may have changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * the original data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) memcpy(&path_rcd_buf, data->rcd_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) DASD_ECKD_RCD_DATA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) path_private.conf_data = (void *) &path_rcd_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (dasd_eckd_identify_conf_parts(&path_private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) path_private.conf_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) path_private.conf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * compare path UID with device UID only if at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * one valid path is left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * in other case the device UID may have changed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * the first working path UID will be used as device UID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (dasd_path_get_opm(device) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) dasd_eckd_compare_path_uid(device, &path_private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * the comparison was not successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * rebuild the device UID with at least one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * known path in case a z/VM hyperswap command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * has changed the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * after this compare again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * if either the rebuild or the recompare fails
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * the path can not be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (rebuild_device_uid(device, data) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) dasd_eckd_compare_path_uid(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) device, &path_private)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) uid = &path_private.uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (strlen(uid->vduit) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) snprintf(print_uid, sizeof(print_uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) "%s.%s.%04x.%02x.%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) uid->vendor, uid->serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) uid->ssid, uid->real_unit_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) uid->vduit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) snprintf(print_uid, sizeof(print_uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) "%s.%s.%04x.%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) uid->vendor, uid->serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) uid->ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) uid->real_unit_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) "The newly added channel path %02X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) "will not be used because it leads "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) "to a different device %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) lpm, print_uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) opm &= ~lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) npm &= ~lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) ppm &= ~lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) cablepm |= lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * There is a small chance that a path is lost again between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * above path verification and the following modification of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * the device opm mask. We could avoid that race here by using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * yet another path mask, but we rather deal with this unlikely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * situation in dasd_start_IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (!dasd_path_get_opm(device) && opm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) dasd_path_set_opm(device, opm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) dasd_generic_path_operational(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) dasd_path_add_opm(device, opm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) dasd_path_add_nppm(device, npm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) dasd_path_add_ppm(device, ppm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) dasd_path_add_tbvpm(device, epm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) dasd_path_add_cablepm(device, cablepm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) dasd_path_add_nohpfpm(device, hpfpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (data->isglobal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) mutex_unlock(&dasd_path_verification_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) struct path_verification_work_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (mutex_trylock(&dasd_path_verification_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) data = path_verification_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) data->isglobal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) memset(data, 0, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) data->isglobal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) INIT_WORK(&data->worker, do_path_verification_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) data->device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) data->tbvpm = lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) schedule_work(&data->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (!private->fcx_max_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) private->fcx_max_data = get_fcx_max_data(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static int dasd_eckd_read_features(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct dasd_psf_prssd_data *prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct dasd_rssd_features *features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) memset(&private->features, 0, sizeof(struct dasd_rssd_features));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) (sizeof(struct dasd_psf_prssd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) sizeof(struct dasd_rssd_features)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) "allocate initialization request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) cqr->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) cqr->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) cqr->expires = 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* Prepare for Read Subsystem Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) prssdp = (struct dasd_psf_prssd_data *) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) prssdp->order = PSF_ORDER_PRSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) prssdp->suborder = 0x41; /* Read Feature Codes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /* all other bytes of prssdp must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) ccw->cmd_code = DASD_ECKD_CCW_PSF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) ccw->count = sizeof(struct dasd_psf_prssd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) ccw->flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) ccw->cda = (__u32)(addr_t) prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /* Read Subsystem Data - feature codes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) features = (struct dasd_rssd_features *) (prssdp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) memset(features, 0, sizeof(struct dasd_rssd_features));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) ccw->cmd_code = DASD_ECKD_CCW_RSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) ccw->count = sizeof(struct dasd_rssd_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) ccw->cda = (__u32)(addr_t) features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) rc = dasd_sleep_on(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) prssdp = (struct dasd_psf_prssd_data *) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) features = (struct dasd_rssd_features *) (prssdp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) memcpy(&private->features, features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) sizeof(struct dasd_rssd_features));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) dev_warn(&device->cdev->dev, "Reading device feature codes"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) " failed with rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /* Read Volume Information - Volume Storage Query */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) static int dasd_eckd_read_vol_info(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct dasd_psf_prssd_data *prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct dasd_rssd_vsq *vsq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) int useglobal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /* This command cannot be executed on an alias device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (private->uid.type == UA_BASE_PAV_ALIAS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) private->uid.type == UA_HYPER_PAV_ALIAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) useglobal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) sizeof(*prssdp) + sizeof(*vsq), device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) "Could not allocate initialization request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) mutex_lock(&dasd_vol_info_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) useglobal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) cqr = &dasd_vol_info_req->cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) memset(cqr, 0, sizeof(*cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) cqr->cpaddr = &dasd_vol_info_req->ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) cqr->data = &dasd_vol_info_req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) cqr->magic = DASD_ECKD_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /* Prepare for Read Subsystem Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) prssdp = cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) prssdp->order = PSF_ORDER_PRSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) prssdp->lss = private->ned->ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) prssdp->volume = private->ned->unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) ccw->cmd_code = DASD_ECKD_CCW_PSF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) ccw->count = sizeof(*prssdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ccw->flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) ccw->cda = (__u32)(addr_t)prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* Read Subsystem Data - Volume Storage Query */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) memset(vsq, 0, sizeof(*vsq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) ccw->cmd_code = DASD_ECKD_CCW_RSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) ccw->count = sizeof(*vsq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) ccw->cda = (__u32)(addr_t)vsq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) cqr->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) cqr->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) cqr->expires = device->default_expires * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /* The command might not be supported. Suppress the error output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) rc = dasd_sleep_on_interruptible(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) memcpy(&private->vsq, vsq, sizeof(*vsq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) "Reading the volume storage information failed with rc=%d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (useglobal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) mutex_unlock(&dasd_vol_info_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static int dasd_eckd_is_ese(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) return private->vsq.vol_info.ese;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) static int dasd_eckd_ext_pool_id(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return private->vsq.extent_pool_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * This value represents the total amount of available space. As more space is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * allocated by ESE volumes, this value will decrease.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * The data for this value is therefore updated on any call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) static int dasd_eckd_space_configured(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) rc = dasd_eckd_read_vol_info(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) return rc ? : private->vsq.space_configured;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) * The value of space allocated by an ESE volume may have changed and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * therefore updated on any call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static int dasd_eckd_space_allocated(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) rc = dasd_eckd_read_vol_info(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return rc ? : private->vsq.space_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static int dasd_eckd_logical_capacity(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return private->vsq.logical_capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) struct ext_pool_exhaust_work_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct dasd_device *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) data = container_of(work, struct ext_pool_exhaust_work_data, worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) device = data->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) base = data->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (!base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) base = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (dasd_eckd_space_configured(base) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) dasd_generic_space_avail(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) struct ext_pool_exhaust_work_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) data = kzalloc(sizeof(*data), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) data->device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (cqr->block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) data->base = cqr->block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) else if (cqr->basedev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) data->base = cqr->basedev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) data->base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) schedule_work(&data->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct dasd_rssd_lcq *lcq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) int pool_id = dasd_eckd_ext_pool_id(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) struct dasd_ext_pool_sum eps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) for (i = 0; i < lcq->pool_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) eps = lcq->ext_pool_sum[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (eps.pool_id == pool_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) memcpy(&private->eps, &eps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) sizeof(struct dasd_ext_pool_sum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /* Read Extent Pool Information - Logical Configuration Query */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) struct dasd_psf_prssd_data *prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) struct dasd_rssd_lcq *lcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /* This command cannot be executed on an alias device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (private->uid.type == UA_BASE_PAV_ALIAS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) private->uid.type == UA_HYPER_PAV_ALIAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) sizeof(*prssdp) + sizeof(*lcq), device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) "Could not allocate initialization request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /* Prepare for Read Subsystem Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) prssdp = cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) memset(prssdp, 0, sizeof(*prssdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) prssdp->order = PSF_ORDER_PRSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) ccw->cmd_code = DASD_ECKD_CCW_PSF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) ccw->count = sizeof(*prssdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) ccw->flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) ccw->cda = (__u32)(addr_t)prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) memset(lcq, 0, sizeof(*lcq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) ccw->cmd_code = DASD_ECKD_CCW_RSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) ccw->count = sizeof(*lcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) ccw->cda = (__u32)(addr_t)lcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) cqr->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) cqr->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) cqr->expires = device->default_expires * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) /* The command might not be supported. Suppress the error output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) rc = dasd_sleep_on_interruptible(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) dasd_eckd_cpy_ext_pool_data(device, lcq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) "Reading the logical configuration failed with rc=%d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * Depending on the device type, the extent size is specified either as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * cylinders per extent (CKD) or size per extent (FBA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) static int dasd_eckd_ext_size(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) struct dasd_ext_pool_sum eps = private->eps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (!eps.flags.extent_size_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (eps.extent_size.size_1G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) return 1113;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (eps.extent_size.size_16M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) return 21;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) return private->eps.warn_thrshld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) return private->eps.flags.capacity_at_warnlevel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) * Extent Pool out of space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return private->eps.flags.pool_oos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * Build CP for Perform Subsystem Function - SSC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) int enable_pav)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) struct dasd_psf_ssc_data *psf_ssc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) sizeof(struct dasd_psf_ssc_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) "Could not allocate PSF-SSC request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) psf_ssc_data->order = PSF_ORDER_SSC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) psf_ssc_data->suborder = 0xc0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (enable_pav) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) psf_ssc_data->suborder |= 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) psf_ssc_data->reserved[0] = 0x88;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) ccw->cmd_code = DASD_ECKD_CCW_PSF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) ccw->cda = (__u32)(addr_t)psf_ssc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) ccw->count = 66;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) cqr->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) cqr->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) cqr->expires = 10*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * Perform Subsystem Function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * It is necessary to trigger CIO for channel revalidation since this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * call might change behaviour of DASD devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * set flags e.g. turn on failfast, to prevent blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) * the calling function should handle failed requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) cqr->flags |= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) rc = dasd_sleep_on(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /* trigger CIO to reprobe devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) css_schedule_reprobe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) else if (cqr->intrc == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) * Valide storage server of current device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static int dasd_eckd_validate_server(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) int enable_pav, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (private->uid.type == UA_BASE_PAV_ALIAS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) private->uid.type == UA_HYPER_PAV_ALIAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (dasd_nopav || MACHINE_IS_VM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) enable_pav = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) enable_pav = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) /* may be requested feature is not available on server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * therefore just report error and go ahead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) "returned rc=%d", private->uid.ssid, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * worker to do a validate server in case of a lost pathgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) static void dasd_eckd_do_validate_server(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct dasd_device *device = container_of(work, struct dasd_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) kick_validate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (dasd_eckd_validate_server(device, flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /* schedule worker again if failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) schedule_work(&device->kick_validate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) static void dasd_eckd_kick_validate_server(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /* exit if device not online or in offline processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) device->state < DASD_STATE_ONLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) /* queue call to do_validate_server to the kernel event daemon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) if (!schedule_work(&device->kick_validate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * Check device characteristics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * If the device is accessible using ECKD discipline, the device is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) dasd_eckd_check_characteristics(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct dasd_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) struct dasd_uid temp_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) int readonly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) unsigned long value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /* setup work queue for validate server*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) /* setup work queue for summary unit check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) if (!ccw_device_is_pathgroup(device->cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) "A channel path group could not be established\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (!ccw_device_is_multipath(device->cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) dev_info(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) "The DASD is not operating in multipath mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (!private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (!private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) "Allocating memory for private DASD data "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) "failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) device->private = private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) memset(private, 0, sizeof(*private));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) /* Invalidate status of initial analysis. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) private->init_cqr_status = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /* Set default cache operations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) private->attrib.operation = DASD_NORMAL_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) private->attrib.nr_cyl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) /* Read Configuration Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) rc = dasd_eckd_read_conf(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) goto out_err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) /* set some default values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) device->default_expires = DASD_EXPIRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) device->default_retries = DASD_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) device->path_thrhld = DASD_ECKD_PATH_THRHLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) device->path_interval = DASD_ECKD_PATH_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (private->gneq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) value = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) for (i = 0; i < private->gneq->timeout.value; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) value = 10 * value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) value = value * private->gneq->timeout.number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) /* do not accept useless values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (value != 0 && value <= DASD_EXPIRES_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) device->default_expires = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) dasd_eckd_get_uid(device, &temp_uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (temp_uid.type == UA_BASE_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) block = dasd_alloc_block();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (IS_ERR(block)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) "could not allocate dasd "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) "block structure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) rc = PTR_ERR(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) goto out_err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) device->block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) block->base = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) /* register lcu with alias handling, enable PAV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) rc = dasd_alias_make_device_known_to_lcu(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) goto out_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) dasd_eckd_validate_server(device, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) /* device may report different configuration data after LCU setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) rc = dasd_eckd_read_conf(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) goto out_err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) /* Read Feature Codes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) dasd_eckd_read_features(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) /* Read Volume Information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) dasd_eckd_read_vol_info(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) /* Read Extent Pool Information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) dasd_eckd_read_ext_pool_info(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) /* Read Device Characteristics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) &private->rdc_data, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) "Read device characteristic failed, rc=%d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) goto out_err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if ((device->features & DASD_FEATURE_USERAW) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) !(private->rdc_data.facilities.RT_in_LR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) dev_err(&device->cdev->dev, "The storage server does not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) "support raw-track access\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) goto out_err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) /* find the valid cylinder size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) private->rdc_data.long_no_cyl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) private->real_cyl = private->rdc_data.long_no_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) private->real_cyl = private->rdc_data.no_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) private->fcx_max_data = get_fcx_max_data(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) readonly = dasd_device_is_ro(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (readonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) "with %d cylinders, %d heads, %d sectors%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) private->rdc_data.dev_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) private->rdc_data.dev_model,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) private->rdc_data.cu_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) private->rdc_data.cu_model.model,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) private->real_cyl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) private->rdc_data.trk_per_cyl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) private->rdc_data.sec_per_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) readonly ? ", read-only device" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) out_err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) dasd_alias_disconnect_device_from_lcu(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) out_err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) dasd_free_block(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) device->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) out_err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) dasd_eckd_clear_conf_data(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) kfree(device->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) device->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static void dasd_eckd_uncheck_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (!private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) dasd_alias_disconnect_device_from_lcu(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) private->ned = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) private->sneq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) private->vdsneq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) private->gneq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) dasd_eckd_clear_conf_data(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) static struct dasd_ccw_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) dasd_eckd_analysis_ccw(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) struct eckd_count *count_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct LO_eckd_data *LO_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) int cplength, datasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) cplength = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) /* Define extent for the first 2 tracks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) define_extent(ccw++, cqr->data, 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) DASD_ECKD_CCW_READ_COUNT, device, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) LO_data = cqr->data + sizeof(struct DE_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) /* Locate record for the first 4 records on track 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) locate_record(ccw++, LO_data++, 0, 0, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) DASD_ECKD_CCW_READ_COUNT, device, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) count_data = private->count_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) ccw->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) ccw->count = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) ccw->cda = (__u32)(addr_t) count_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) count_data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) /* Locate record for the first record on track 1. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) locate_record(ccw++, LO_data++, 1, 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) DASD_ECKD_CCW_READ_COUNT, device, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) /* Read count ccw. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) ccw->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) ccw->count = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) ccw->cda = (__u32)(addr_t) count_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) cqr->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) cqr->retries = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) /* Set flags to suppress output for expected errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) /* differentiate between 'no record found' and any other error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) char *sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) if (init_cqr->status == DASD_CQR_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) return INIT_CQR_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) else if (init_cqr->status == DASD_CQR_NEED_ERP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) init_cqr->status == DASD_CQR_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) sense = dasd_get_sense(&init_cqr->irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (sense && (sense[1] & SNS1_NO_REC_FOUND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) return INIT_CQR_UNFORMATTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) return INIT_CQR_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) return INIT_CQR_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) * This is the callback function for the init_analysis cqr. It saves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * the status of the initial analysis ccw before it frees it and kicks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * the device to continue the startup sequence. This will call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * dasd_eckd_do_analysis again (if the devices has not been marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * for deletion in the meantime).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) struct dasd_device *device = init_cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) dasd_sfree_request(init_cqr, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) dasd_kick_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) static int dasd_eckd_start_analysis(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) struct dasd_ccw_req *init_cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) init_cqr = dasd_eckd_analysis_ccw(block->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) if (IS_ERR(init_cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) return PTR_ERR(init_cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) init_cqr->callback = dasd_eckd_analysis_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) init_cqr->callback_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) init_cqr->expires = 5*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /* first try without ERP, so we can later handle unformatted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * devices as special case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) init_cqr->retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) dasd_add_request_head(init_cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) static int dasd_eckd_end_analysis(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) struct dasd_device *device = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) struct eckd_count *count_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) unsigned int sb, blk_per_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) int status, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) struct dasd_ccw_req *init_cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) status = private->init_cqr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) private->init_cqr_status = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) if (status == INIT_CQR_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) /* try again, this time with full ERP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) init_cqr = dasd_eckd_analysis_ccw(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) dasd_sleep_on(init_cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) status = dasd_eckd_analysis_evaluation(init_cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) dasd_sfree_request(init_cqr, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (device->features & DASD_FEATURE_USERAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) block->bp_block = DASD_RAW_BLOCKSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) block->s2b_shift = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) goto raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (status == INIT_CQR_UNFORMATTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) return -EMEDIUMTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) } else if (status == INIT_CQR_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) "Detecting the DASD disk layout failed because "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) "of an I/O error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) private->uses_cdl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) /* Check Track 0 for Compatible Disk Layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) count_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (private->count_area[i].kl != 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) private->count_area[i].cyl != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) private->count_area[i].head != count_area_head[i] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) private->count_area[i].record != count_area_rec[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) private->uses_cdl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (i == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) count_area = &private->count_area[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (private->uses_cdl == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if ((private->count_area[i].kl != 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) (private->count_area[i].dl !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) private->count_area[0].dl) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) private->count_area[i].cyl != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) private->count_area[i].head != count_area_head[i] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) private->count_area[i].record != count_area_rec[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) if (i == 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) count_area = &private->count_area[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) if (private->count_area[3].record == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) "Track 0 has no records following the VTOC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (count_area != NULL && count_area->kl == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) /* we found notthing violating our disk layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (dasd_check_blocksize(count_area->dl) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) block->bp_block = count_area->dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (block->bp_block == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) "The disk layout of the DASD is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) return -EMEDIUMTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) block->s2b_shift = 0; /* bits to shift 512 to get a block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) for (sb = 512; sb < block->bp_block; sb = sb << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) block->s2b_shift++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) raw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) block->blocks = ((unsigned long) private->real_cyl *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) private->rdc_data.trk_per_cyl *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) blk_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) dev_info(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) "DASD with %u KB/block, %lu KB total size, %u KB/track, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) "%s\n", (block->bp_block >> 10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) (((unsigned long) private->real_cyl *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) private->rdc_data.trk_per_cyl *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) blk_per_trk * (block->bp_block >> 9)) >> 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) ((blk_per_trk * block->bp_block) >> 10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) private->uses_cdl ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) "compatible disk layout" : "linux disk layout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) static int dasd_eckd_do_analysis(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) struct dasd_eckd_private *private = block->base->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) if (private->init_cqr_status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) return dasd_eckd_start_analysis(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) return dasd_eckd_end_analysis(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) static int dasd_eckd_basic_to_ready(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) return dasd_alias_add_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) static int dasd_eckd_online_to_ready(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) if (cancel_work_sync(&device->reload_device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (cancel_work_sync(&device->kick_validate))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) static int dasd_eckd_basic_to_known(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) return dasd_alias_remove_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) struct dasd_eckd_private *private = block->base->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) if (dasd_check_blocksize(block->bp_block) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) geo->sectors = recs_per_track(&private->rdc_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 0, block->bp_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) geo->cylinders = private->rdc_data.no_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) geo->heads = private->rdc_data.trk_per_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) * Build the TCW request for the format check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) static struct dasd_ccw_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) int enable_pav, struct eckd_count *fmt_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) int rpt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) struct dasd_eckd_private *start_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) struct dasd_device *startdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) struct tidaw *last_tidaw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) struct itcw *itcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) int itcw_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) if (enable_pav)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) startdev = dasd_alias_get_start_dev(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) if (!startdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) startdev = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) start_priv = startdev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) * we're adding 'count' amount of tidaw to the itcw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) * calculate the corresponding itcw_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) itcw_size = itcw_calc_size(0, count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) start_priv->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (IS_ERR(itcw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) cqr->cpaddr = itcw_get_tcw(itcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) sizeof(struct eckd_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) count * sizeof(struct eckd_count), 0, rpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) sizeof(struct eckd_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (IS_ERR(last_tidaw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) last_tidaw->flags |= TIDAW_FLAGS_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) itcw_finalize(itcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) cqr->cpmode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) cqr->startdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) cqr->memdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) cqr->basedev = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) cqr->retries = startdev->default_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) cqr->expires = startdev->default_expires * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) /* Set flags to suppress output for expected errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) dasd_sfree_request(cqr, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) * Build the CCW request for the format check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) static struct dasd_ccw_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) int enable_pav, struct eckd_count *fmt_buffer, int rpt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) struct dasd_eckd_private *start_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) struct dasd_eckd_private *base_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) struct dasd_device *startdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) int cplength, datasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) int use_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) if (enable_pav)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) startdev = dasd_alias_get_start_dev(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (!startdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) startdev = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) start_priv = startdev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) base_priv = base->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) use_prefix = base_priv->features.feature[8] & 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) if (use_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) cplength = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) datasize = sizeof(struct PFX_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) cplength = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) datasize = sizeof(struct DE_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) sizeof(struct LO_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) cplength += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) start_priv->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) data = cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) if (use_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) count, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) DASD_ECKD_CCW_READ_COUNT, startdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) data += sizeof(struct DE_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) locate_record(ccw++, data, fdata->start_unit, 0, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) DASD_ECKD_CCW_READ_COUNT, base, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) ccw->flags = CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) ccw->count = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) ccw->cda = (__u32)(addr_t) fmt_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) fmt_buffer++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) cqr->startdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) cqr->memdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) cqr->basedev = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) cqr->retries = DASD_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) cqr->expires = startdev->default_expires * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) /* Set flags to suppress output for expected errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) static struct dasd_ccw_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) struct format_data_t *fdata, int enable_pav)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) struct dasd_eckd_private *base_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) struct dasd_eckd_private *start_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) struct dasd_ccw_req *fcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) struct eckd_count *ect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) struct ch_t address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) int rpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) int cplength, datasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) int intensity = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) int r0_perm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) int nr_tracks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) int use_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (enable_pav)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) startdev = dasd_alias_get_start_dev(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) if (!startdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) startdev = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) start_priv = startdev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) base_priv = base->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) * fdata->intensity is a bit string that tells us what to do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) * Bit 0: write record zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) * Bit 1: write home address, currently not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) * Bit 2: invalidate tracks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) * Bit 3: use OS/390 compatible disk layout (cdl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * Bit 4: do not allow storage subsystem to modify record zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) * Only some bit combinations do make sense.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) if (fdata->intensity & 0x10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) r0_perm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) intensity = fdata->intensity & ~0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) r0_perm = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) intensity = fdata->intensity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) use_prefix = base_priv->features.feature[8] & 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) switch (intensity) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) case 0x00: /* Normal format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) case 0x08: /* Normal format, use cdl. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) cplength = 2 + (rpt*nr_tracks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) if (use_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) datasize = sizeof(struct PFX_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) sizeof(struct LO_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) rpt * nr_tracks * sizeof(struct eckd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) datasize = sizeof(struct DE_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) sizeof(struct LO_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) rpt * nr_tracks * sizeof(struct eckd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) case 0x01: /* Write record zero and format track. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) case 0x09: /* Write record zero and format track, use cdl. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) cplength = 2 + rpt * nr_tracks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) if (use_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) datasize = sizeof(struct PFX_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) sizeof(struct LO_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) sizeof(struct eckd_count) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) rpt * nr_tracks * sizeof(struct eckd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) datasize = sizeof(struct DE_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) sizeof(struct LO_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) sizeof(struct eckd_count) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) rpt * nr_tracks * sizeof(struct eckd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) case 0x04: /* Invalidate track. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) case 0x0c: /* Invalidate track, use cdl. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) cplength = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (use_prefix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) datasize = sizeof(struct PFX_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) sizeof(struct LO_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) sizeof(struct eckd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) datasize = sizeof(struct DE_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) sizeof(struct LO_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) sizeof(struct eckd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) dev_warn(&startdev->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) "An I/O control call used incorrect flags 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) fdata->intensity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (IS_ERR(fcp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) return fcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) start_priv->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) data = fcp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) ccw = fcp->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) switch (intensity & ~0x08) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) case 0x00: /* Normal format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (use_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) prefix(ccw++, (struct PFX_eckd_data *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) fdata->start_unit, fdata->stop_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) DASD_ECKD_CCW_WRITE_CKD, base, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) /* grant subsystem permission to format R0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) if (r0_perm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) ((struct PFX_eckd_data *)data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) ->define_extent.ga_extended |= 0x04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) data += sizeof(struct PFX_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) define_extent(ccw++, (struct DE_eckd_data *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) fdata->start_unit, fdata->stop_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) /* grant subsystem permission to format R0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) if (r0_perm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) ((struct DE_eckd_data *) data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) ->ga_extended |= 0x04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) data += sizeof(struct DE_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) locate_record(ccw++, (struct LO_eckd_data *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) fdata->start_unit, 0, rpt*nr_tracks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) DASD_ECKD_CCW_WRITE_CKD, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) fdata->blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) data += sizeof(struct LO_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) case 0x01: /* Write record zero + format track. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) if (use_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) prefix(ccw++, (struct PFX_eckd_data *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) fdata->start_unit, fdata->stop_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) DASD_ECKD_CCW_WRITE_RECORD_ZERO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) base, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) data += sizeof(struct PFX_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) define_extent(ccw++, (struct DE_eckd_data *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) fdata->start_unit, fdata->stop_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) data += sizeof(struct DE_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) locate_record(ccw++, (struct LO_eckd_data *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) fdata->start_unit, 0, rpt * nr_tracks + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) base->block->bp_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) data += sizeof(struct LO_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) case 0x04: /* Invalidate track. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (use_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) prefix(ccw++, (struct PFX_eckd_data *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) fdata->start_unit, fdata->stop_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) DASD_ECKD_CCW_WRITE_CKD, base, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) data += sizeof(struct PFX_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) define_extent(ccw++, (struct DE_eckd_data *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) fdata->start_unit, fdata->stop_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) data += sizeof(struct DE_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) locate_record(ccw++, (struct LO_eckd_data *) data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) fdata->start_unit, 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) DASD_ECKD_CCW_WRITE_CKD, base, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) data += sizeof(struct LO_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) for (j = 0; j < nr_tracks; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) /* calculate cylinder and head for the current track */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) set_ch_t(&address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) (fdata->start_unit + j) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) base_priv->rdc_data.trk_per_cyl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) (fdata->start_unit + j) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) base_priv->rdc_data.trk_per_cyl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) if (intensity & 0x01) { /* write record zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) ect = (struct eckd_count *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) data += sizeof(struct eckd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) ect->cyl = address.cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) ect->head = address.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) ect->record = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) ect->kl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) ect->dl = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) ccw->flags = CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) ccw->count = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) ccw->cda = (__u32)(addr_t) ect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if ((intensity & ~0x08) & 0x04) { /* erase track */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) ect = (struct eckd_count *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) data += sizeof(struct eckd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) ect->cyl = address.cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) ect->head = address.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) ect->record = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) ect->kl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) ect->dl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) ccw->flags = CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) ccw->count = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) ccw->cda = (__u32)(addr_t) ect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) } else { /* write remaining records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) for (i = 0; i < rpt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) ect = (struct eckd_count *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) data += sizeof(struct eckd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) ect->cyl = address.cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) ect->head = address.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) ect->record = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) ect->kl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) ect->dl = fdata->blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) * Check for special tracks 0-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) * when formatting CDL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if ((intensity & 0x08) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) address.cyl == 0 && address.head == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) if (i < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) ect->kl = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) ect->dl = sizes_trk0[i] - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) if ((intensity & 0x08) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) address.cyl == 0 && address.head == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) ect->kl = 44;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) ect->dl = LABEL_SIZE - 44;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) if (i != 0 || j == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) ccw->cmd_code =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) DASD_ECKD_CCW_WRITE_CKD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) ccw->cmd_code =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) DASD_ECKD_CCW_WRITE_CKD_MT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) ccw->flags = CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) ccw->count = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) ccw->cda = (__u32)(addr_t) ect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) fcp->startdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) fcp->memdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) fcp->basedev = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) fcp->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) fcp->expires = startdev->default_expires * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) fcp->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) fcp->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) return fcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) * Wrapper function to build a CCW request depending on input data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) static struct dasd_ccw_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) dasd_eckd_format_build_ccw_req(struct dasd_device *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) struct format_data_t *fdata, int enable_pav,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) int tpm, struct eckd_count *fmt_buffer, int rpt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) struct dasd_ccw_req *ccw_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (!fmt_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) if (tpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) ccw_req = dasd_eckd_build_check_tcw(base, fdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) enable_pav,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) fmt_buffer, rpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) fmt_buffer, rpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) return ccw_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) * Sanity checks on format_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) struct format_data_t *fdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) struct dasd_eckd_private *private = base->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) if (fdata->start_unit >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) (private->real_cyl * private->rdc_data.trk_per_cyl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) dev_warn(&base->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) "Start track number %u used in formatting is too big\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) fdata->start_unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (fdata->stop_unit >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) (private->real_cyl * private->rdc_data.trk_per_cyl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) dev_warn(&base->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) "Stop track number %u used in formatting is too big\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) fdata->stop_unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) if (fdata->start_unit > fdata->stop_unit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) dev_warn(&base->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) "Start track %u used in formatting exceeds end track\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) fdata->start_unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) if (dasd_check_blocksize(fdata->blksize) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) dev_warn(&base->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) "The DASD cannot be formatted with block size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) fdata->blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) * This function will process format_data originally coming from an IOCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) static int dasd_eckd_format_process_data(struct dasd_device *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) struct format_data_t *fdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) int enable_pav, int tpm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) struct eckd_count *fmt_buffer, int rpt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) struct dasd_eckd_private *private = base->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) struct dasd_ccw_req *cqr, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) struct list_head format_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) char *sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) int old_start, old_stop, format_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) int step, retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) rc = dasd_eckd_format_sanity_checks(base, fdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) INIT_LIST_HEAD(&format_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) old_start = fdata->start_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) old_stop = fdata->stop_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) if (!tpm && fmt_buffer != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) /* Command Mode / Format Check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) format_step = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) } else if (tpm && fmt_buffer != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) /* Transport Mode / Format Check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) format_step = DASD_CQR_MAX_CCW / rpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) /* Normal Formatting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) format_step = DASD_CQR_MAX_CCW /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) recs_per_track(&private->rdc_data, 0, fdata->blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) while (fdata->start_unit <= old_stop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) step = fdata->stop_unit - fdata->start_unit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) if (step > format_step) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) fdata->stop_unit =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) fdata->start_unit + format_step - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) cqr = dasd_eckd_format_build_ccw_req(base, fdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) enable_pav, tpm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) fmt_buffer, rpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) rc = PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) if (rc == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) if (list_empty(&format_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) * not enough memory available, start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) * requests retry after first requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) * were finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) list_add_tail(&cqr->blocklist, &format_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) if (fmt_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) step = fdata->stop_unit - fdata->start_unit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) fmt_buffer += rpt * step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) fdata->start_unit = fdata->stop_unit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) fdata->stop_unit = old_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) rc = dasd_sleep_on_queue(&format_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) if (cqr->status == DASD_CQR_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) * Only get sense data if called by format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) * check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) if (fmt_buffer && irb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) sense = dasd_get_sense(&cqr->irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) memcpy(irb, &cqr->irb, sizeof(*irb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) list_del_init(&cqr->blocklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) dasd_ffree_request(cqr, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) private->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) if (rc && rc != -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) if (rc == -EIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) * In case fewer than the expected records are on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) * track, we will most likely get a 'No Record Found'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) * error (in command mode) or a 'File Protected' error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) * (in transport mode). Those particular cases shouldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) * pass the -EIO to the IOCTL, therefore reset the rc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) * and continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) if (sense &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) (sense[1] & SNS1_NO_REC_FOUND ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) sense[1] & SNS1_FILE_PROTECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) } while (retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) fdata->start_unit = old_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) fdata->stop_unit = old_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) static int dasd_eckd_format_device(struct dasd_device *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) struct format_data_t *fdata, int enable_pav)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) static bool test_and_set_format_track(struct dasd_format_entry *to_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) struct dasd_format_entry *format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) bool rc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) spin_lock_irqsave(&block->format_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) list_for_each_entry(format, &block->format_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) if (format->track == to_format->track) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) rc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) list_add_tail(&to_format->list, &block->format_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) spin_unlock_irqrestore(&block->format_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) static void clear_format_track(struct dasd_format_entry *format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) spin_lock_irqsave(&block->format_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) list_del_init(&format->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) spin_unlock_irqrestore(&block->format_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) * Callback function to free ESE format requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) struct dasd_device *device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) struct dasd_format_entry *format = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) clear_format_track(format, cqr->basedev->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) private->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) dasd_ffree_request(cqr, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) static struct dasd_ccw_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) struct dasd_eckd_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) struct dasd_format_entry *format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) struct format_data_t fdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) unsigned int recs_per_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) struct dasd_ccw_req *fcqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) struct dasd_device *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) struct dasd_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) unsigned int blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) sector_t first_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) sector_t last_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) sector_t curr_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) req = cqr->callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) block = cqr->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) base = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) private = base->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) blksize = block->bp_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) format = &startdev->format_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) first_trk = blk_rq_pos(req) >> block->s2b_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) sector_div(first_trk, recs_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) last_trk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) sector_div(last_trk, recs_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) if (curr_trk < first_trk || curr_trk > last_trk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) DBF_DEV_EVENT(DBF_WARNING, startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) "ESE error track %llu not within range %llu - %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) curr_trk, first_trk, last_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) format->track = curr_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) /* test if track is already in formatting by another thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (test_and_set_format_track(format, block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) return ERR_PTR(-EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) fdata.start_unit = curr_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) fdata.stop_unit = curr_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) fdata.blksize = blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) rc = dasd_eckd_format_sanity_checks(base, &fdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) * We're building the request with PAV disabled as we're reusing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) * the former startdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) if (IS_ERR(fcqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) return fcqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) fcqr->callback = dasd_eckd_ese_format_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) fcqr->callback_data = (void *) format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) return fcqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) * When data is read from an unformatted area of an ESE volume, this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) * returns zeroed data and thereby mimics a read of zero data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) * The first unformatted track is the one that got the NRF error, the address is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) * encoded in the sense data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) * All tracks before have returned valid data and should not be touched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) * All tracks after the unformatted track might be formatted or not. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) * currently not known, remember the processed data and return the remainder of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) * the request to the blocklayer in __dasd_cleanup_cqr().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) struct dasd_eckd_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) sector_t first_trk, last_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) sector_t first_blk, last_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) unsigned int blksize, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) unsigned int recs_per_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) struct dasd_device *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) struct dasd_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) unsigned int skip_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) unsigned int blk_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) struct request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) sector_t curr_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) sector_t end_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) char *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) req = (struct request *) cqr->callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) base = cqr->block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) blksize = base->block->bp_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) block = cqr->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) private = base->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) skip_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) blk_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) sector_div(first_trk, recs_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) last_trk = last_blk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) sector_div(last_trk, recs_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) /* sanity check if the current track from sense data is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) if (curr_trk < first_trk || curr_trk > last_trk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) DBF_DEV_EVENT(DBF_WARNING, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) "ESE error track %llu not within range %llu - %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) curr_trk, first_trk, last_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) * if not the first track got the NRF error we have to skip over valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) * blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) if (curr_trk != first_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) skip_block = curr_trk * recs_per_trk - first_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) /* we have no information beyond the current track */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) end_blk = (curr_trk + 1) * recs_per_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) dst = page_address(bv.bv_page) + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) for (off = 0; off < bv.bv_len; off += blksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (first_blk + blk_count >= end_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) cqr->proc_bytes = blk_count * blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) if (dst && !skip_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) dst += off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) memset(dst, 0, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) skip_block--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) blk_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) * Helper function to count consecutive records of a single track.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) int max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) head = fmt_buffer[start].head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) * There are 3 conditions where we stop counting:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) * - if data reoccurs (same head and record may reoccur), which may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) * happen due to the way DASD_ECKD_CCW_READ_COUNT works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) * - when the head changes, because we're iterating over several tracks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) * then (DASD_ECKD_CCW_READ_COUNT_MT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) * - when we've reached the end of sensible data in the buffer (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) * record will be 0 then)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) for (i = start; i < max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) if (i > start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) if ((fmt_buffer[i].head == head &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) fmt_buffer[i].record == 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) fmt_buffer[i].head != head ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) fmt_buffer[i].record == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) return i - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) * Evaluate a given range of tracks. Data like number of records, blocksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) * record ids, and key length are compared with expected data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) * If a mismatch occurs, the corresponding error bit is set, as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) * additional information, depending on the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) struct format_check_t *cdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) int rpt_max, int rpt_exp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) int trk_per_cyl, int tpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) struct ch_t geo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) int max_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) int trkcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) int blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) int pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) int kl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) max_entries = trkcount * rpt_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) /* Calculate the correct next starting position in the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) if (tpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) while (fmt_buffer[pos].record == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) fmt_buffer[pos].dl == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) if (pos++ > max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if (i != cdata->expect.start_unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) pos += rpt_max - count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) /* Calculate the expected geo values for the current track */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) /* Count and check number of records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) if (count < rpt_exp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) if (count > rpt_exp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) for (j = 0; j < count; j++, pos++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) blksize = cdata->expect.blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) kl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) * Set special values when checking CDL formatted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) * devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if ((cdata->expect.intensity & 0x08) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) geo.cyl == 0 && geo.head == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) if (j < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) blksize = sizes_trk0[j] - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) kl = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) if ((cdata->expect.intensity & 0x08) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) geo.cyl == 0 && geo.head == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) blksize = LABEL_SIZE - 44;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) kl = 44;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) /* Check blocksize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) if (fmt_buffer[pos].dl != blksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) cdata->result = DASD_FMT_ERR_BLKSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) /* Check if key length is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) if (fmt_buffer[pos].kl != kl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) cdata->result = DASD_FMT_ERR_KEY_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) /* Check if record_id is correct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) if (fmt_buffer[pos].cyl != geo.cyl ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) fmt_buffer[pos].head != geo.head ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) fmt_buffer[pos].record != (j + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) cdata->result = DASD_FMT_ERR_RECORD_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) * In case of no errors, we need to decrease by one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) * to get the correct positions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) if (!cdata->result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) pos--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) cdata->unit = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) cdata->num_records = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) cdata->rec = fmt_buffer[pos].record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) cdata->blksize = fmt_buffer[pos].dl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) cdata->key_length = fmt_buffer[pos].kl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) * Check the format of a range of tracks of a DASD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) static int dasd_eckd_check_device_format(struct dasd_device *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) struct format_check_t *cdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) int enable_pav)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) struct dasd_eckd_private *private = base->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) struct eckd_count *fmt_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) struct irb irb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) int rpt_max, rpt_exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) int fmt_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) int trk_per_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) int trkcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) int tpm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) trk_per_cyl = private->rdc_data.trk_per_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) /* Get maximum and expected amount of records per track */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) if (!fmt_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) * A certain FICON feature subset is needed to operate in transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) * mode. Additionally, the support for transport mode is implicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) * checked by comparing the buffer size with fcx_max_data. As long as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) * the buffer size is smaller we can operate in transport mode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) * process multiple tracks. If not, only one track at once is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) * processed using command mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) if ((private->features.feature[40] & 0x04) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) fmt_buffer_size <= private->fcx_max_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) tpm = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) tpm, fmt_buffer, rpt_max, &irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) if (rc && rc != -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) if (rc == -EIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) * If our first attempt with transport mode enabled comes back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) * with an incorrect length error, we're going to retry the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) * check with command mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) tpm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) rc = dasd_eckd_format_process_data(base, &cdata->expect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) enable_pav, tpm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) fmt_buffer, rpt_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) &irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) trk_per_cyl, tpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) kfree(fmt_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) if (cqr->retries < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) cqr->status = DASD_CQR_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) if (cqr->block && (cqr->startdev != cqr->block->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) dasd_eckd_reset_ccw_to_base_io(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) cqr->startdev = cqr->block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) cqr->lpm = dasd_path_get_opm(cqr->block->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) static dasd_erp_fn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) struct dasd_device *device = (struct dasd_device *) cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) struct ccw_device *cdev = device->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) switch (cdev->id.cu_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) case 0x3990:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) case 0x2105:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) case 0x2107:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) case 0x1750:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) return dasd_3990_erp_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) case 0x9343:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) case 0x3880:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) return dasd_default_erp_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) static dasd_erp_fn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) return dasd_default_erp_postaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) static void dasd_eckd_check_for_device_change(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) struct dasd_ccw_req *cqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) char mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) char *sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) /* first of all check for state change pending interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if ((scsw_dstat(&irb->scsw) & mask) == mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) * for alias only, not in offline processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) * and only if not suspended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) if (!device->block && private->lcu &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) device->state == DASD_STATE_ONLINE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) /* schedule worker to reload device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) dasd_reload_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) dasd_generic_handle_state_change(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) sense = dasd_get_sense(irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) if (!sense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) /* summary unit check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) "eckd suc: device already notified");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) sense = dasd_get_sense(irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) if (!sense) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) "eckd suc: no reason code available");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) clear_bit(DASD_FLAG_SUC, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) private->suc_reason = sense[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) "eckd handle summary unit check: reason",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) private->suc_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) if (!schedule_work(&device->suc_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) /* service information message SIM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) dasd_3990_erp_handle_sim(device, sense);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) /* loss of device reservation is handled via base devices only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) * as alias devices may be used with several bases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) (sense[7] == 0x3F) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) if (device->features & DASD_FEATURE_FAILONSLCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) "The device reservation was lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) unsigned int first_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) unsigned int last_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) unsigned int trks_per_vol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) if (first_trk >= trks_per_vol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) "Start track number %u used in the space release command is too big\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) first_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) } else if (last_trk >= trks_per_vol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) "Stop track number %u used in the space release command is too big\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) last_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) } else if (first_trk > last_trk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) "Start track %u used in the space release command exceeds the end track\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) first_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) * Helper function to count the amount of involved extents within a given range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) * with extent alignment in mind.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) int cur_pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) if (from == to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) /* Count first partial extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) if (from % trks_per_ext != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) if (tmp > to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) tmp = to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) cur_pos = tmp - from + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) /* Count full extents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) if (to - (from + cur_pos) + 1 >= trks_per_ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) cur_pos = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) /* Count last partial extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) if (cur_pos < to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) * Release allocated space for a given range or an entire volume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) static struct dasd_ccw_req *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) struct request *req, unsigned int first_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) unsigned int last_trk, int by_extent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) struct dasd_dso_ras_ext_range *ras_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) struct dasd_rssd_features *features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) struct dasd_dso_ras_data *ras_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) u16 heads, beg_head, end_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) int cur_to_trk, cur_from_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) u32 beg_cyl, end_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) int trks_per_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) size_t ras_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) int nr_exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) void *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) rq = req ? blk_mq_rq_to_pdu(req) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) features = &private->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) nr_exts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) if (by_extent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) ras_size = sizeof(*ras_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) size = ras_size + (nr_exts * sizeof(*ras_range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) "Could not allocate RAS request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) ras_data = cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) memset(ras_data, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) ras_data->order = DSO_ORDER_RAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) ras_data->flags.vol_type = 0; /* CKD volume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) /* Release specified extents or entire volume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) ras_data->op_flags.by_extent = by_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) * This bit guarantees initialisation of tracks within an extent that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) * not fully specified, but is only supported with a certain feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) * subset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) ras_data->lss = private->ned->ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) ras_data->dev_addr = private->ned->unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) ras_data->nr_exts = nr_exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) if (by_extent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) heads = private->rdc_data.trk_per_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) cur_from_trk = first_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) cur_to_trk = first_trk + trks_per_ext -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) (first_trk % trks_per_ext) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) if (cur_to_trk > last_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) cur_to_trk = last_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) for (i = 0; i < nr_exts; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) beg_cyl = cur_from_trk / heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) beg_head = cur_from_trk % heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) end_cyl = cur_to_trk / heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) end_head = cur_to_trk % heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) set_ch_t(&ras_range->end_ext, end_cyl, end_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) cur_from_trk = cur_to_trk + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) cur_to_trk = cur_from_trk + trks_per_ext - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) if (cur_to_trk > last_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) cur_to_trk = last_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) ras_range++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) ccw->cda = (__u32)(addr_t)cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) ccw->cmd_code = DASD_ECKD_CCW_DSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) ccw->count = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) cqr->block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) cqr->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) cqr->expires = device->default_expires * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) static int dasd_eckd_release_space_full(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) return PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) rc = dasd_sleep_on_interruptible(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) static int dasd_eckd_release_space_trks(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) unsigned int from, unsigned int to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) struct dasd_block *block = device->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) struct dasd_ccw_req *cqr, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) struct list_head ras_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) unsigned int device_exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) int trks_per_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) int stop, step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) int cur_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) int retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) INIT_LIST_HEAD(&ras_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) device_exts = private->real_cyl / dasd_eckd_ext_size(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) /* Make sure device limits are not exceeded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) cur_pos = from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) while (cur_pos < to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) stop = cur_pos + step -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) ((cur_pos + step) % trks_per_ext) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) if (stop > to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) stop = to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) rc = PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) if (rc == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) if (list_empty(&ras_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) spin_lock_irq(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) list_add_tail(&cqr->blocklist, &ras_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) spin_unlock_irq(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) cur_pos = stop + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) rc = dasd_sleep_on_queue_interruptible(&ras_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) device = cqr->startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) spin_lock_irq(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) list_del_init(&cqr->blocklist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) spin_unlock_irq(&block->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) dasd_sfree_request(cqr, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) private->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) } while (retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) static int dasd_eckd_release_space(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) struct format_data_t *rdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) return dasd_eckd_release_space_full(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) else if (rdata->intensity == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) return dasd_eckd_release_space_trks(device, rdata->start_unit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) rdata->stop_unit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) struct dasd_device *startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) struct dasd_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) sector_t first_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) sector_t last_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) sector_t first_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) sector_t last_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) unsigned int first_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) unsigned int last_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) unsigned int blk_per_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) unsigned int blksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) struct dasd_eckd_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) unsigned long *idaws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) struct LO_eckd_data *LO_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) char *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) unsigned int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) int count, cidaw, cplength, datasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) sector_t recid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) unsigned char cmd, rcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) int use_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) struct dasd_device *basedev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) basedev = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) private = basedev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) if (rq_data_dir(req) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) cmd = DASD_ECKD_CCW_READ_MT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) else if (rq_data_dir(req) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) cmd = DASD_ECKD_CCW_WRITE_MT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) /* Check struct bio and count the number of blocks for the request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) cidaw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) if (bv.bv_len & (blksize - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) /* Eckd can only do full blocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) count += bv.bv_len >> (block->s2b_shift + 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) cidaw += bv.bv_len >> (block->s2b_shift + 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) /* Paranoia. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) if (count != last_rec - first_rec + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) /* use the prefix command if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) use_prefix = private->features.feature[8] & 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) if (use_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) /* 1x prefix + number of blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) cplength = 2 + count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) /* 1x prefix + cidaws*sizeof(long) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) datasize = sizeof(struct PFX_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) sizeof(struct LO_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) cidaw * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) /* 1x define extent + 1x locate record + number of blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) cplength = 2 + count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) datasize = sizeof(struct DE_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) sizeof(struct LO_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) cidaw * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) /* Find out the number of additional locate record ccws for cdl. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) if (private->uses_cdl && first_rec < 2*blk_per_trk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) if (last_rec >= 2*blk_per_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) count = 2*blk_per_trk - first_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) cplength += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) datasize += count*sizeof(struct LO_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) /* Allocate the ccw request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) startdev, blk_mq_rq_to_pdu(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) /* First ccw is define extent or prefix. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) if (use_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) if (prefix(ccw++, cqr->data, first_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) last_trk, cmd, basedev, startdev) == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) /* Clock not in sync and XRC is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) * Try again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) dasd_sfree_request(cqr, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) idaws = (unsigned long *) (cqr->data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) sizeof(struct PFX_eckd_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) if (define_extent(ccw++, cqr->data, first_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) last_trk, cmd, basedev, 0) == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) /* Clock not in sync and XRC is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) * Try again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) dasd_sfree_request(cqr, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) idaws = (unsigned long *) (cqr->data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) sizeof(struct DE_eckd_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) /* Build locate_record+read/write/ccws. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) LO_data = (struct LO_eckd_data *) (idaws + cidaw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) recid = first_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) /* Only standard blocks so there is just one locate record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) last_rec - recid + 1, cmd, basedev, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) dst = page_address(bv.bv_page) + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) if (dasd_page_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) char *copy = kmem_cache_alloc(dasd_page_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) GFP_DMA | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) if (copy && rq_data_dir(req) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) memcpy(copy + bv.bv_offset, dst, bv.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) if (copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) dst = copy + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) for (off = 0; off < bv.bv_len; off += blksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) sector_t trkid = recid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) unsigned int recoffs = sector_div(trkid, blk_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) rcmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) count = blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) /* Locate record for cdl special block ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) if (private->uses_cdl && recid < 2*blk_per_trk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) if (dasd_eckd_cdl_special(blk_per_trk, recid)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) rcmd |= 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) count = dasd_eckd_cdl_reclen(recid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) if (count < blksize &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) rq_data_dir(req) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) memset(dst + count, 0xe5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) blksize - count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) locate_record(ccw++, LO_data++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) trkid, recoffs + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 1, rcmd, basedev, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) /* Locate record for standard blocks ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) if (private->uses_cdl && recid == 2*blk_per_trk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) locate_record(ccw++, LO_data++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) trkid, recoffs + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) last_rec - recid + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) cmd, basedev, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) /* Read/write ccw. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) ccw->cmd_code = rcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) ccw->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) if (idal_is_needed(dst, blksize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) ccw->cda = (__u32)(addr_t) idaws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) ccw->flags = CCW_FLAG_IDA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) idaws = idal_create_words(idaws, dst, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) ccw->cda = (__u32)(addr_t) dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) ccw->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) dst += blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) recid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) if (blk_noretry_request(req) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) block->base->features & DASD_FEATURE_FAILFAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) cqr->startdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) cqr->memdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) cqr->block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) cqr->lpm = dasd_path_get_ppm(startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) cqr->retries = startdev->default_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) /* Set flags to suppress output for expected errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) if (dasd_eckd_is_ese(basedev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) struct dasd_device *startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) struct dasd_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) sector_t first_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) sector_t last_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) sector_t first_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) sector_t last_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) unsigned int first_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) unsigned int last_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) unsigned int blk_per_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) unsigned int blksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) unsigned long *idaws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) char *dst, *idaw_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) unsigned int cidaw, cplength, datasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) unsigned int tlf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) sector_t recid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) unsigned char cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) struct dasd_device *basedev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) unsigned int trkcount, count, count_to_trk_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) unsigned int idaw_len, seg_len, part_len, len_to_track_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) unsigned char new_track, end_idaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) sector_t trkid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) unsigned int recoffs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) basedev = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) if (rq_data_dir(req) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) else if (rq_data_dir(req) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) /* Track based I/O needs IDAWs for each page, and not just for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) * 64 bit addresses. We need additional idals for pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) * that get filled from two tracks, so we use the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) * of records as upper limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) cidaw = last_rec - first_rec + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) trkcount = last_trk - first_trk + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) /* 1x prefix + one read/write ccw per track */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) cplength = 1 + trkcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) /* Allocate the ccw request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) startdev, blk_mq_rq_to_pdu(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) /* transfer length factor: how many bytes to read from the last track */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) if (first_trk == last_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) tlf = last_offs - first_offs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) tlf = last_offs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) tlf *= blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) if (prefix_LRE(ccw++, cqr->data, first_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) last_trk, cmd, basedev, startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 1 /* format */, first_offs + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) trkcount, blksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) tlf) == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) /* Clock not in sync and XRC is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) * Try again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) dasd_sfree_request(cqr, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) * The translation of request into ccw programs must meet the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) * following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) * - all idaws but the first and the last must address full pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) * (or 2K blocks on 31-bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) * - the scope of a ccw and it's idal ends with the track boundaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) recid = first_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) new_track = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) end_idaw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) len_to_track_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) idaw_dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) idaw_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) dst = page_address(bv.bv_page) + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) seg_len = bv.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) while (seg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) if (new_track) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) trkid = recid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) recoffs = sector_div(trkid, blk_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) count_to_trk_end = blk_per_trk - recoffs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) count = min((last_rec - recid + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) (sector_t)count_to_trk_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) len_to_track_end = count * blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) ccw->cmd_code = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) ccw->count = len_to_track_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) ccw->cda = (__u32)(addr_t)idaws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) ccw->flags = CCW_FLAG_IDA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) recid += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) new_track = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) /* first idaw for a ccw may start anywhere */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) if (!idaw_dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) idaw_dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) /* If we start a new idaw, we must make sure that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) * starts on an IDA_BLOCK_SIZE boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) * If we continue an idaw, we must make sure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) * current segment begins where the so far accumulated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) * idaw ends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) if (!idaw_dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) dasd_sfree_request(cqr, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) return ERR_PTR(-ERANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) idaw_dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) if ((idaw_dst + idaw_len) != dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) dasd_sfree_request(cqr, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) return ERR_PTR(-ERANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) part_len = min(seg_len, len_to_track_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) seg_len -= part_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) dst += part_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) idaw_len += part_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) len_to_track_end -= part_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) /* collected memory area ends on an IDA_BLOCK border,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) * -> create an idaw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) * idal_create_words will handle cases where idaw_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) * is larger then IDA_BLOCK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) end_idaw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) /* We also need to end the idaw at track end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) if (!len_to_track_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) new_track = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) end_idaw = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) if (end_idaw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) idaws = idal_create_words(idaws, idaw_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) idaw_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) idaw_dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) idaw_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) end_idaw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) if (blk_noretry_request(req) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) block->base->features & DASD_FEATURE_FAILFAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) cqr->startdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) cqr->memdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) cqr->block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) cqr->lpm = dasd_path_get_ppm(startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) cqr->retries = startdev->default_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) /* Set flags to suppress output for expected errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) if (dasd_eckd_is_ese(basedev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) static int prepare_itcw(struct itcw *itcw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) unsigned int trk, unsigned int totrk, int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) struct dasd_device *basedev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) struct dasd_device *startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) unsigned int rec_on_trk, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) unsigned int blksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) unsigned int total_data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) unsigned int tlf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) unsigned int blk_per_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) struct PFX_eckd_data pfxdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) struct dasd_eckd_private *basepriv, *startpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) struct DE_eckd_data *dedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) struct LRE_eckd_data *lredata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) struct dcw *dcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) u32 begcyl, endcyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) u16 heads, beghead, endhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) u8 pfx_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) int sector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) int dn, d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) /* setup prefix data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) basepriv = basedev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) startpriv = startdev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) dedata = &pfxdata.define_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) lredata = &pfxdata.locate_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) memset(&pfxdata, 0, sizeof(pfxdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) pfxdata.format = 1; /* PFX with LRE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) pfxdata.base_address = basepriv->ned->unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) pfxdata.base_lss = basepriv->ned->ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) pfxdata.validity.define_extent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) /* private uid is kept up to date, conf_data may be outdated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) pfxdata.validity.verify_base = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) pfxdata.validity.verify_base = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) pfxdata.validity.hyper_pav = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) case DASD_ECKD_CCW_READ_TRACK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) dedata->mask.perm = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) dedata->attributes.operation = basepriv->attrib.operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) dedata->blk_size = blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) dedata->ga_extended |= 0x42;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) lredata->operation.orientation = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) lredata->operation.operation = 0x0C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) lredata->auxiliary.check_bytes = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) pfx_cmd = DASD_ECKD_CCW_PFX_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) case DASD_ECKD_CCW_WRITE_TRACK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) dedata->mask.perm = 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) dedata->attributes.operation = basepriv->attrib.operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) dedata->blk_size = blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) rc = set_timestamp(NULL, dedata, basedev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) dedata->ga_extended |= 0x42;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) lredata->operation.orientation = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) lredata->operation.operation = 0x3F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) lredata->extended_operation = 0x23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) lredata->auxiliary.check_bytes = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) * If XRC is supported the System Time Stamp is set. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) * validity of the time stamp must be reflected in the prefix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) * data as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) pfx_cmd = DASD_ECKD_CCW_PFX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) case DASD_ECKD_CCW_READ_COUNT_MT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) dedata->mask.perm = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) dedata->attributes.operation = DASD_BYPASS_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) dedata->ga_extended |= 0x42;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) dedata->blk_size = blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) lredata->operation.orientation = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) lredata->operation.operation = 0x16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) lredata->auxiliary.check_bytes = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) pfx_cmd = DASD_ECKD_CCW_PFX_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) DBF_DEV_EVENT(DBF_ERR, basedev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) "prepare itcw, unknown opcode 0x%x", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) dedata->attributes.mode = 0x3; /* ECKD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) heads = basepriv->rdc_data.trk_per_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) begcyl = trk / heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) beghead = trk % heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) endcyl = totrk / heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) endhead = totrk % heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) /* check for sequential prestage - enhance cylinder range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) dedata->attributes.operation == DASD_SEQ_ACCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) endcyl += basepriv->attrib.nr_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) endcyl = (basepriv->real_cyl - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) set_ch_t(&dedata->beg_ext, begcyl, beghead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) set_ch_t(&dedata->end_ext, endcyl, endhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) dedata->ep_format = 0x20; /* records per track is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) dedata->ep_rec_per_track = blk_per_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) if (rec_on_trk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) switch (basepriv->rdc_data.dev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) case 0x3390:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) dn = ceil_quot(blksize + 6, 232);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) case 0x3380:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) d = 7 + ceil_quot(blksize + 12, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) lredata->auxiliary.length_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) lredata->auxiliary.length_scope = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) lredata->sector = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) lredata->auxiliary.length_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) lredata->auxiliary.length_scope = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) lredata->sector = sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) lredata->auxiliary.imbedded_ccw_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) lredata->length = tlf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) lredata->imbedded_ccw = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) lredata->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) set_ch_t(&lredata->seek_addr, begcyl, beghead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) lredata->search_arg.cyl = lredata->seek_addr.cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) lredata->search_arg.head = lredata->seek_addr.head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) lredata->search_arg.record = rec_on_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) &pfxdata, sizeof(pfxdata), total_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) return PTR_ERR_OR_ZERO(dcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) struct dasd_device *startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) struct dasd_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) struct request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) sector_t first_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) sector_t last_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) sector_t first_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) sector_t last_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) unsigned int first_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) unsigned int last_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) unsigned int blk_per_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) unsigned int blksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) char *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) unsigned int trkcount, ctidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) unsigned char cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) struct dasd_device *basedev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) unsigned int tlf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) struct itcw *itcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) struct tidaw *last_tidaw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) int itcw_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) size_t itcw_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) u8 tidaw_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) unsigned int seg_len, part_len, len_to_track_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) unsigned char new_track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) sector_t recid, trkid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) unsigned int offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) unsigned int count, count_to_trk_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) basedev = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) if (rq_data_dir(req) == READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) itcw_op = ITCW_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) } else if (rq_data_dir(req) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) itcw_op = ITCW_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) /* trackbased I/O needs address all memory via TIDAWs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) * not just for 64 bit addresses. This allows us to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) * each segment directly to one tidaw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) * In the case of write requests, additional tidaws may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) * be needed when a segment crosses a track boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) trkcount = last_trk - first_trk + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) ctidaw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) ++ctidaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) if (rq_data_dir(req) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) ctidaw += (last_trk - first_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) /* Allocate the ccw request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) itcw_size = itcw_calc_size(0, ctidaw, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) blk_mq_rq_to_pdu(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) /* transfer length factor: how many bytes to read from the last track */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) if (first_trk == last_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) tlf = last_offs - first_offs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) tlf = last_offs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) tlf *= blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) if (IS_ERR(itcw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) cqr->cpaddr = itcw_get_tcw(itcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) if (prepare_itcw(itcw, first_trk, last_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) cmd, basedev, startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) first_offs + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) trkcount, blksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) (last_rec - first_rec + 1) * blksize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) tlf, blk_per_trk) == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) /* Clock not in sync and XRC is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) * Try again later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) len_to_track_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) * A tidaw can address 4k of memory, but must not cross page boundaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) * We can let the block layer handle this by setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) * blk_queue_segment_boundary to page boundaries and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) * blk_max_segment_size to page size when setting up the request queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) * For write requests, a TIDAW must not cross track boundaries, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) * we have to set the CBC flag on the last tidaw for each track.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) if (rq_data_dir(req) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) new_track = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) recid = first_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) dst = page_address(bv.bv_page) + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) seg_len = bv.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) while (seg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) if (new_track) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) trkid = recid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) offs = sector_div(trkid, blk_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) count_to_trk_end = blk_per_trk - offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) count = min((last_rec - recid + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) (sector_t)count_to_trk_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) len_to_track_end = count * blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) recid += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) new_track = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) part_len = min(seg_len, len_to_track_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) seg_len -= part_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) len_to_track_end -= part_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) /* We need to end the tidaw at track end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) if (!len_to_track_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) new_track = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) tidaw_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) dst, part_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) if (IS_ERR(last_tidaw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) dst += part_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) dst = page_address(bv.bv_page) + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) last_tidaw = itcw_add_tidaw(itcw, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) dst, bv.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) if (IS_ERR(last_tidaw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) last_tidaw->flags |= TIDAW_FLAGS_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) itcw_finalize(itcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) if (blk_noretry_request(req) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) block->base->features & DASD_FEATURE_FAILFAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) cqr->cpmode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) cqr->startdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) cqr->memdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) cqr->block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) cqr->lpm = dasd_path_get_ppm(startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) cqr->retries = startdev->default_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) /* Set flags to suppress output for expected errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) if (dasd_eckd_is_ese(basedev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) dasd_sfree_request(cqr, startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) struct dasd_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) int cmdrtd, cmdwtd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) int use_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) int fcx_multitrack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) struct dasd_eckd_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) struct dasd_device *basedev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) sector_t first_rec, last_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) sector_t first_trk, last_trk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) unsigned int first_offs, last_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) unsigned int blk_per_trk, blksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) int cdlspecial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) unsigned int data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) basedev = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) private = basedev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) /* Calculate number of blocks/records per track. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) blksize = block->bp_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) if (blk_per_trk == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) /* Calculate record id of first and last block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) first_offs = sector_div(first_trk, blk_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) last_rec = last_trk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) last_offs = sector_div(last_trk, blk_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) fcx_multitrack = private->features.feature[40] & 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) data_size = blk_rq_bytes(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) if (data_size % blksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) /* tpm write request add CBC data on each track boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) if (rq_data_dir(req) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) data_size += (last_trk - first_trk) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) /* is read track data and write track data in command mode supported? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) cmdrtd = private->features.feature[9] & 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) cmdwtd = private->features.feature[12] & 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) use_prefix = private->features.feature[8] & 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) cqr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) if (cdlspecial || dasd_page_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) /* do nothing, just fall through to the cmd mode single case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) } else if ((data_size <= private->fcx_max_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) && (fcx_multitrack || (first_trk == last_trk))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) first_rec, last_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) first_trk, last_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) first_offs, last_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) blk_per_trk, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) (PTR_ERR(cqr) != -ENOMEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) cqr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) } else if (use_prefix &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) (((rq_data_dir(req) == READ) && cmdrtd) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) ((rq_data_dir(req) == WRITE) && cmdwtd))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) first_rec, last_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) first_trk, last_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) first_offs, last_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) blk_per_trk, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) (PTR_ERR(cqr) != -ENOMEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) cqr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) if (!cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) first_rec, last_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) first_trk, last_trk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) first_offs, last_offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) blk_per_trk, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) struct dasd_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) unsigned int seg_len, len_to_track_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) unsigned int cidaw, cplength, datasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) sector_t first_trk, last_trk, sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) struct dasd_eckd_private *base_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) struct dasd_device *basedev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) unsigned int first_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) unsigned int trkcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) unsigned long *idaws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) unsigned char cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) int use_prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) char *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) * raw track access needs to be mutiple of 64k and on 64k boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) * For read requests we can fix an incorrect alignment by padding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) * the request with dummy pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) DASD_RAW_SECTORS_PER_TRACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) DASD_RAW_SECTORS_PER_TRACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) basedev = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) if ((start_padding_sectors || end_padding_sectors) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) (rq_data_dir(req) == WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) DBF_DEV_EVENT(DBF_ERR, basedev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) "raw write not track aligned (%llu,%llu) req %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) start_padding_sectors, end_padding_sectors, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) DASD_RAW_SECTORS_PER_TRACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) trkcount = last_trk - first_trk + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) first_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) if (rq_data_dir(req) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) cmd = DASD_ECKD_CCW_READ_TRACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) else if (rq_data_dir(req) == WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) * Raw track based I/O needs IDAWs for each page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) * and not just for 64 bit addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) * of extended parameter. This is needed for write full track.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) base_priv = basedev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) use_prefix = base_priv->features.feature[8] & 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) if (use_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) cplength = 1 + trkcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) size = sizeof(struct PFX_eckd_data) + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) cplength = 2 + trkcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) size = sizeof(struct DE_eckd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) sizeof(struct LRE_eckd_data) + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) size = ALIGN(size, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) datasize = size + cidaw * sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) /* Allocate the ccw request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) datasize, startdev, blk_mq_rq_to_pdu(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) data = cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) if (use_prefix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) startdev, 1, first_offs + 1, trkcount, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) data += sizeof(struct DE_eckd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) locate_record_ext(ccw++, data, first_trk, first_offs + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) trkcount, cmd, basedev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) idaws = (unsigned long *)(cqr->data + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) len_to_track_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) if (start_padding_sectors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) ccw->cmd_code = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) /* maximum 3390 track size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) ccw->count = 57326;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) /* 64k map to one track */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) len_to_track_end = 65536 - start_padding_sectors * 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) ccw->cda = (__u32)(addr_t)idaws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) ccw->flags |= CCW_FLAG_IDA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) dst = page_address(bv.bv_page) + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) seg_len = bv.bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) if (cmd == DASD_ECKD_CCW_READ_TRACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) memset(dst, 0, seg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) if (!len_to_track_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) ccw[-1].flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) ccw->cmd_code = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) /* maximum 3390 track size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) ccw->count = 57326;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) /* 64k map to one track */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) len_to_track_end = 65536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) ccw->cda = (__u32)(addr_t)idaws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) ccw->flags |= CCW_FLAG_IDA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) len_to_track_end -= seg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) idaws = idal_create_words(idaws, dst, seg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) if (blk_noretry_request(req) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) block->base->features & DASD_FEATURE_FAILFAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) cqr->startdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) cqr->memdev = startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) cqr->block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) cqr->expires = startdev->default_expires * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) cqr->lpm = dasd_path_get_ppm(startdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) cqr->retries = startdev->default_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) struct dasd_eckd_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) struct req_iterator iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) struct bio_vec bv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) char *dst, *cda;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) unsigned int blksize, blk_per_trk, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) sector_t recid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) if (!dasd_page_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) private = cqr->block->base->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) blksize = cqr->block->bp_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) /* Skip over define extent & locate record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) rq_for_each_segment(bv, req, iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) dst = page_address(bv.bv_page) + bv.bv_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) for (off = 0; off < bv.bv_len; off += blksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) /* Skip locate record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) if (private->uses_cdl && recid <= 2*blk_per_trk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) if (dst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) if (ccw->flags & CCW_FLAG_IDA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) cda = *((char **)((addr_t) ccw->cda));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) cda = (char *)((addr_t) ccw->cda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) if (dst != cda) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) if (rq_data_dir(req) == READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) memcpy(dst, cda, bv.bv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) kmem_cache_free(dasd_page_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) (void *)((addr_t)cda & PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) recid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) status = cqr->status == DASD_CQR_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) * Modify ccw/tcw in cqr so it can be started on a base device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) * Note that this is not enough to restart the cqr!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) * Either reset cqr->startdev as well (summary unit check handling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) * or restart via separate cqr (as in ERP handling).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) struct PFX_eckd_data *pfxdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) struct tcw *tcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) struct tccb *tccb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) struct dcw *dcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) if (cqr->cpmode == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) tcw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) tccb = tcw_get_tccb(tcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) dcw = (struct dcw *)&tccb->tca[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) pfxdata->validity.verify_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) pfxdata->validity.hyper_pav = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) pfxdata = cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) pfxdata->validity.verify_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) pfxdata->validity.hyper_pav = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) #define DASD_ECKD_CHANQ_MAX_SIZE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) struct dasd_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) struct dasd_eckd_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) struct dasd_device *startdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) startdev = dasd_alias_get_start_dev(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) if (!startdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) startdev = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) private = startdev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) private->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) if ((base->features & DASD_FEATURE_USERAW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) cqr = dasd_eckd_build_cp_raw(startdev, block, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) cqr = dasd_eckd_build_cp(startdev, block, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) private->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) return cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) struct request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) struct dasd_eckd_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) private = cqr->memdev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) private->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) return dasd_eckd_free_cp(cqr, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) dasd_eckd_fill_info(struct dasd_device * device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) struct dasd_information2_t * info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) info->label_block = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) info->FBA_layout = private->uses_cdl ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) info->characteristics_size = sizeof(private->rdc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) memcpy(info->characteristics, &private->rdc_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) sizeof(private->rdc_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) info->confdata_size = min((unsigned long)private->conf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) sizeof(info->configuration_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) memcpy(info->configuration_data, private->conf_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) info->confdata_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) * SECTION: ioctl functions for eckd devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) * Release device ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) * Buils a channel programm to releases a prior reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) * (see dasd_eckd_reserve) device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) dasd_eckd_release(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) int useglobal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) useglobal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) mutex_lock(&dasd_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) useglobal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) cqr = &dasd_reserve_req->cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) memset(cqr, 0, sizeof(*cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) memset(&dasd_reserve_req->ccw, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) sizeof(dasd_reserve_req->ccw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) cqr->cpaddr = &dasd_reserve_req->ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) cqr->data = &dasd_reserve_req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) cqr->magic = DASD_ECKD_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) ccw->count = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) ccw->cda = (__u32)(addr_t) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) cqr->retries = 2; /* set retry counter to enable basic ERP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) cqr->expires = 2 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) rc = dasd_sleep_on_immediatly(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) if (useglobal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) mutex_unlock(&dasd_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) * Reserve device ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) * Options are set to 'synchronous wait for interrupt' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) * 'timeout the request'. This leads to a terminate IO if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) * the interrupt is outstanding for a certain time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) dasd_eckd_reserve(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) int useglobal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) useglobal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) mutex_lock(&dasd_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) useglobal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) cqr = &dasd_reserve_req->cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) memset(cqr, 0, sizeof(*cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) memset(&dasd_reserve_req->ccw, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) sizeof(dasd_reserve_req->ccw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) cqr->cpaddr = &dasd_reserve_req->ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) cqr->data = &dasd_reserve_req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) cqr->magic = DASD_ECKD_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) ccw->count = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) ccw->cda = (__u32)(addr_t) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) cqr->retries = 2; /* set retry counter to enable basic ERP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) cqr->expires = 2 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) rc = dasd_sleep_on_immediatly(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) if (useglobal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) mutex_unlock(&dasd_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) * Steal lock ioctl - unconditional reserve device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) * Buils a channel programm to break a device's reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) * (unconditional reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) dasd_eckd_steal_lock(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) int useglobal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) useglobal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) mutex_lock(&dasd_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) useglobal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) cqr = &dasd_reserve_req->cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) memset(cqr, 0, sizeof(*cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) memset(&dasd_reserve_req->ccw, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) sizeof(dasd_reserve_req->ccw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) cqr->cpaddr = &dasd_reserve_req->ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) cqr->data = &dasd_reserve_req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) cqr->magic = DASD_ECKD_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) ccw->cmd_code = DASD_ECKD_CCW_SLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) ccw->count = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) ccw->cda = (__u32)(addr_t) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) cqr->retries = 2; /* set retry counter to enable basic ERP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) cqr->expires = 2 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) rc = dasd_sleep_on_immediatly(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) if (useglobal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) mutex_unlock(&dasd_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) * SNID - Sense Path Group ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) * This ioctl may be used in situations where I/O is stalled due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) * a reserve, so if the normal dasd_smalloc_request fails, we use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) * preallocated dasd_reserve_req.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) static int dasd_eckd_snid(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) int useglobal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) struct dasd_snid_ioctl_data usrparm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) useglobal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) sizeof(struct dasd_snid_data), device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) mutex_lock(&dasd_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) useglobal = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) cqr = &dasd_reserve_req->cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) memset(cqr, 0, sizeof(*cqr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) memset(&dasd_reserve_req->ccw, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) sizeof(dasd_reserve_req->ccw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) cqr->cpaddr = &dasd_reserve_req->ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) cqr->data = &dasd_reserve_req->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) cqr->magic = DASD_ECKD_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) ccw->cmd_code = DASD_ECKD_CCW_SNID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) ccw->count = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) ccw->cda = (__u32)(addr_t) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) cqr->retries = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) cqr->expires = 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) cqr->lpm = usrparm.path_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) rc = dasd_sleep_on_immediatly(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) /* verify that I/O processing didn't modify the path mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) usrparm.data = *((struct dasd_snid_data *)cqr->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) if (useglobal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) mutex_unlock(&dasd_reserve_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) * Read performance statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) dasd_eckd_performance(struct dasd_device *device, void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) struct dasd_psf_prssd_data *prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) struct dasd_rssd_perf_stats_t *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) (sizeof(struct dasd_psf_prssd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) sizeof(struct dasd_rssd_perf_stats_t)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) "Could not allocate initialization request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) return PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) cqr->retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) cqr->expires = 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) /* Prepare for Read Subsystem Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) prssdp = (struct dasd_psf_prssd_data *) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) prssdp->order = PSF_ORDER_PRSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) prssdp->suborder = 0x01; /* Performance Statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) ccw->cmd_code = DASD_ECKD_CCW_PSF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) ccw->count = sizeof(struct dasd_psf_prssd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) ccw->flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) ccw->cda = (__u32)(addr_t) prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) /* Read Subsystem Data - Performance Statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) ccw->cmd_code = DASD_ECKD_CCW_RSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) ccw->cda = (__u32)(addr_t) stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) rc = dasd_sleep_on(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) prssdp = (struct dasd_psf_prssd_data *) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) if (copy_to_user(argp, stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) sizeof(struct dasd_rssd_perf_stats_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) * Get attributes (cache operations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) * Returnes the cache attributes used in Define Extend (DE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) struct attrib_data_t attrib = private->attrib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) if (!argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) if (copy_to_user(argp, (long *) &attrib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) sizeof(struct attrib_data_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) * Set attributes (cache operations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) * Stores the attributes for cache operation to be used in Define Extend (DE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) struct attrib_data_t attrib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) if (!argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) private->attrib = attrib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) dev_info(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) "The DASD cache mode was set to %x (%i cylinder prestage)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) private->attrib.operation, private->attrib.nr_cyl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) * Issue syscall I/O to EMC Symmetrix array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) * CCWs are PSF and RSSD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) static int dasd_symm_io(struct dasd_device *device, void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) struct dasd_symmio_parms usrparm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) char *psf_data, *rssd_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) char psf0, psf1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) psf0 = psf1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) /* Copy parms from caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) if (is_compat_task()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) /* Make sure pointers are sane even on 31 bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) if ((usrparm.psf_data >> 32) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) if ((usrparm.rssd_result >> 32) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) usrparm.psf_data &= 0x7fffffffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) usrparm.rssd_result &= 0x7fffffffULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) /* at least 2 bytes are accessed and should be allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) if (usrparm.psf_data_len < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) DBF_DEV_EVENT(DBF_WARNING, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) "Symmetrix ioctl invalid data length %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) usrparm.psf_data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) /* alloc I/O data area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) if (!psf_data || !rssd_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) /* get syscall header from user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) if (copy_from_user(psf_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) (void __user *)(unsigned long) usrparm.psf_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) usrparm.psf_data_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) psf0 = psf_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) psf1 = psf_data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) /* setup CCWs for PSF + RSSD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) "Could not allocate initialization request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) rc = PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) cqr->retries = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) cqr->expires = 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) /* Build the ccws */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) /* PSF ccw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) ccw->cmd_code = DASD_ECKD_CCW_PSF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) ccw->count = usrparm.psf_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) ccw->flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) ccw->cda = (__u32)(addr_t) psf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) /* RSSD ccw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) ccw->cmd_code = DASD_ECKD_CCW_RSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) ccw->count = usrparm.rssd_result_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) ccw->flags = CCW_FLAG_SLI ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) ccw->cda = (__u32)(addr_t) rssd_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) rc = dasd_sleep_on(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) goto out_sfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) rssd_result, usrparm.rssd_result_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) goto out_sfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) out_sfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) kfree(rssd_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) kfree(psf_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) DBF_DEV_EVENT(DBF_WARNING, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) (int) psf0, (int) psf1, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) struct dasd_device *device = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) case BIODASDGATTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) return dasd_eckd_get_attrib(device, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) case BIODASDSATTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) return dasd_eckd_set_attrib(device, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) case BIODASDPSRD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) return dasd_eckd_performance(device, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) case BIODASDRLSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) return dasd_eckd_release(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) case BIODASDRSRV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) return dasd_eckd_reserve(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) case BIODASDSLCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) return dasd_eckd_steal_lock(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) case BIODASDSNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) return dasd_eckd_snid(device, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) case BIODASDSYMMIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) return dasd_symm_io(device, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) * Dump the range of CCWs into 'page' buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) * and return number of printed chars.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) int len, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) char *datap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) while (from <= to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) " CCW %p: %08X %08X DAT:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) from, ((int *) from)[0], ((int *) from)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) /* get pointer to data (consider IDALs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) if (from->flags & CCW_FLAG_IDA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) datap = (char *) *((addr_t *) (addr_t) from->cda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) datap = (char *) ((addr_t) from->cda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) /* dump data (max 32 bytes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) for (count = 0; count < from->count && count < 32; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) if (count % 8 == 0) len += sprintf(page + len, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) if (count % 4 == 0) len += sprintf(page + len, " ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) len += sprintf(page + len, "%02x", datap[count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) len += sprintf(page + len, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) from++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) char *reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) u64 *sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) u64 *stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) sense = (u64 *) dasd_get_sense(irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) stat = (u64 *) &irb->scsw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) if (sense) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) "%016llx %016llx %016llx %016llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) reason, *stat, *((u32 *) (stat + 1)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) sense[0], sense[1], sense[2], sense[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) reason, *stat, *((u32 *) (stat + 1)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) "NO VALID SENSE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) * Print sense data and related channel program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) * Parts are printed because printk buffer is only 1024 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) struct dasd_ccw_req *req, struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) char *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) struct ccw1 *first, *last, *fail, *from, *to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) int len, sl, sct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) page = (char *) get_zeroed_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) if (page == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) "No memory to dump sense data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) /* dump the sense data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) len = sprintf(page, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) " I/O status report for device %s:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) dev_name(&device->cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) "CS:%02X RC:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) req ? req->intrc : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) " device %s: Failing CCW: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) dev_name(&device->cdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) (void *) (addr_t) irb->scsw.cmd.cpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) if (irb->esw.esw0.erw.cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) for (sl = 0; sl < 4; sl++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) " Sense(hex) %2d-%2d:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) (8 * sl), ((8 * sl) + 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) for (sct = 0; sct < 8; sct++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) len += sprintf(page + len, " %02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) irb->ecw[8 * sl + sct]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) len += sprintf(page + len, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) if (irb->ecw[27] & DASD_SENSE_BIT_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) /* 24 Byte Sense Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) " 24 Byte: %x MSG %x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) "%s MSGb to SYSOP\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) irb->ecw[1] & 0x10 ? "" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) /* 32 Byte Sense Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) " 32 Byte: Format: %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) "Exception class %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) " SORRY - NO VALID SENSE AVAILABLE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) printk(KERN_ERR "%s", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) if (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) /* req == NULL for unsolicited interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) /* dump the Channel Program (max 140 Bytes per line) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) first = req->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) to = min(first + 6, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) len = sprintf(page, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) " Related CP in req: %p\n", req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) dasd_eckd_dump_ccw_range(first, to, page + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) printk(KERN_ERR "%s", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) /* print failing CCW area (maximum 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) /* scsw->cda is either valid or zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) from = ++to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) fail = (struct ccw1 *)(addr_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) irb->scsw.cmd.cpa; /* failing CCW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) if (from < fail - 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) from = fail - 2; /* there is a gap - print header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) len += sprintf(page, PRINTK_HEADER "......\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) to = min(fail + 1, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) len += dasd_eckd_dump_ccw_range(from, to, page + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) /* print last CCWs (maximum 2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) from = max(from, ++to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) if (from < last - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) from = last - 1; /* there is a gap - print header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) len += sprintf(page + len, PRINTK_HEADER "......\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) len += dasd_eckd_dump_ccw_range(from, last, page + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) if (len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) printk(KERN_ERR "%s", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) free_page((unsigned long) page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) * Print sense data from a tcw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) struct dasd_ccw_req *req, struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) char *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) int len, sl, sct, residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) struct tsb *tsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) u8 *sense, *rcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) page = (char *) get_zeroed_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) if (page == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) DBF_DEV_EVENT(DBF_WARNING, device, " %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) "No memory to dump sense data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) /* dump the sense data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) len = sprintf(page, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) " I/O status report for device %s:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) dev_name(&device->cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) irb->scsw.tm.fcxs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) req ? req->intrc : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) " device %s: Failing TCW: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) dev_name(&device->cdev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) (void *) (addr_t) irb->scsw.tm.tcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) tsb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) tsb = tcw_get_tsb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) if (tsb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) " tsb->length %d\n", tsb->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) " tsb->flags %x\n", tsb->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) " tsb->dcw_offset %d\n", tsb->dcw_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) " tsb->count %d\n", tsb->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) residual = tsb->count - 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) " residual %d\n", residual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) switch (tsb->flags & 0x07) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) case 1: /* tsa_iostat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) " tsb->tsa.iostat.dev_time %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) tsb->tsa.iostat.dev_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) " tsb->tsa.iostat.def_time %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) tsb->tsa.iostat.def_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) " tsb->tsa.iostat.queue_time %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) tsb->tsa.iostat.queue_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) " tsb->tsa.iostat.dev_busy_time %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) tsb->tsa.iostat.dev_busy_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) " tsb->tsa.iostat.dev_act_time %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) tsb->tsa.iostat.dev_act_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) sense = tsb->tsa.iostat.sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) case 2: /* ts_ddpc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) for (sl = 0; sl < 2; sl++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) " tsb->tsa.ddpc.rcq %2d-%2d: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) (8 * sl), ((8 * sl) + 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) rcq = tsb->tsa.ddpc.rcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) for (sct = 0; sct < 8; sct++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) len += sprintf(page + len, " %02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) rcq[8 * sl + sct]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) len += sprintf(page + len, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) sense = tsb->tsa.ddpc.sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) case 3: /* tsa_intrg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) " tsb->tsa.intrg.: not supported yet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) if (sense) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) for (sl = 0; sl < 4; sl++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) len += sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) " Sense(hex) %2d-%2d:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) (8 * sl), ((8 * sl) + 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) for (sct = 0; sct < 8; sct++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) len += sprintf(page + len, " %02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) sense[8 * sl + sct]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) len += sprintf(page + len, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) if (sense[27] & DASD_SENSE_BIT_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) /* 24 Byte Sense Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) " 24 Byte: %x MSG %x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) "%s MSGb to SYSOP\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) sense[7] >> 4, sense[7] & 0x0f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) sense[1] & 0x10 ? "" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) /* 32 Byte Sense Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) " 32 Byte: Format: %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) "Exception class %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) sense[6] & 0x0f, sense[22] >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) " SORRY - NO VALID SENSE AVAILABLE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) sprintf(page + len, PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) " SORRY - NO TSB DATA AVAILABLE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) printk(KERN_ERR "%s", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) free_page((unsigned long) page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) static void dasd_eckd_dump_sense(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) struct dasd_ccw_req *req, struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) u8 *sense = dasd_get_sense(irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) if (scsw_is_tm(&irb->scsw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) * In some cases the 'File Protected' or 'Incorrect Length'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) * error might be expected and log messages shouldn't be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) * then. Check if the according suppress bit is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) if (scsw_cstat(&irb->scsw) == 0x40 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) dasd_eckd_dump_sense_tcw(device, req, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) * In some cases the 'Command Reject' or 'No Record Found'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) * error might be expected and log messages shouldn't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) * written then. Check if the according suppress bit is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) if (sense && sense[0] & SNS0_CMD_REJECT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) if (sense && sense[1] & SNS1_NO_REC_FOUND &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) dasd_eckd_dump_sense_ccw(device, req, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) static int dasd_eckd_pm_freeze(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) * the device should be disconnected from our LCU structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) * on restore we will reconnect it and reread LCU specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) * information like PAV support that might have changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) dasd_alias_remove_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) dasd_alias_disconnect_device_from_lcu(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) static int dasd_eckd_restore_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) struct dasd_eckd_characteristics temp_rdc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) struct dasd_uid temp_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) unsigned long cqr_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) /* Read Configuration Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) rc = dasd_eckd_read_conf(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) "Read configuration data failed, rc=%d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) dasd_eckd_get_uid(device, &temp_uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) /* Generate device unique id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) rc = dasd_eckd_generate_uid(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) dev_err(&device->cdev->dev, "The UID of the DASD has "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) "changed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) /* register lcu with alias handling, enable PAV if this is a new lcu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) rc = dasd_alias_make_device_known_to_lcu(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) dasd_eckd_validate_server(device, cqr_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) /* RE-Read Configuration Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) rc = dasd_eckd_read_conf(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) "Read configuration data failed, rc=%d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) goto out_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) /* Read Feature Codes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) dasd_eckd_read_features(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) /* Read Volume Information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) dasd_eckd_read_vol_info(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) /* Read Extent Pool Information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) dasd_eckd_read_ext_pool_info(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) /* Read Device Characteristics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) &temp_rdc_data, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) "Read device characteristic failed, rc=%d", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) goto out_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) /* add device to alias management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) dasd_alias_add_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) out_err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) dasd_alias_disconnect_device_from_lcu(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) static int dasd_eckd_reload_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) int rc, old_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) char print_uid[60];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) struct dasd_uid uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) * remove device from alias handling to prevent new requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) * from being scheduled on the wrong alias device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) dasd_alias_remove_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) old_base = private->uid.base_unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) /* Read Configuration Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) rc = dasd_eckd_read_conf(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) rc = dasd_eckd_generate_uid(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) * update unit address configuration and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) * add device to alias management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) dasd_alias_update_add_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) dasd_eckd_get_uid(device, &uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) if (old_base != uid.base_unit_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) if (strlen(uid.vduit) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) snprintf(print_uid, sizeof(print_uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) uid.ssid, uid.base_unit_addr, uid.vduit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) snprintf(print_uid, sizeof(print_uid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) "%s.%s.%04x.%02x", uid.vendor, uid.serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) uid.ssid, uid.base_unit_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) dev_info(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) "An Alias device was reassigned to a new base device "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) "with UID: %s\n", print_uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) static int dasd_eckd_read_message_buffer(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) struct dasd_rssd_messages *messages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) __u8 lpum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) struct dasd_rssd_messages *message_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) struct dasd_psf_prssd_data *prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) (sizeof(struct dasd_psf_prssd_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) sizeof(struct dasd_rssd_messages)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) "Could not allocate read message buffer request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) return PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) cqr->lpm = lpum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) cqr->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) cqr->expires = 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) /* dasd_sleep_on_immediatly does not do complex error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) * recovery so clear erp flag and set retry counter to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) * do basic erp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) cqr->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) /* Prepare for Read Subsystem Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) prssdp = (struct dasd_psf_prssd_data *) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) prssdp->order = PSF_ORDER_PRSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) prssdp->suborder = 0x03; /* Message Buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) /* all other bytes of prssdp must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) ccw->cmd_code = DASD_ECKD_CCW_PSF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) ccw->count = sizeof(struct dasd_psf_prssd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) ccw->flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) ccw->cda = (__u32)(addr_t) prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) /* Read Subsystem Data - message buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) ccw->cmd_code = DASD_ECKD_CCW_RSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) ccw->count = sizeof(struct dasd_rssd_messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) ccw->cda = (__u32)(addr_t) message_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) rc = dasd_sleep_on_immediatly(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) prssdp = (struct dasd_psf_prssd_data *) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) message_buf = (struct dasd_rssd_messages *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) (prssdp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) memcpy(messages, message_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) sizeof(struct dasd_rssd_messages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) } else if (cqr->lpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) * on z/VM we might not be able to do I/O on the requested path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) * but instead we get the required information on any path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) * so retry with open path mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) cqr->lpm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) "Reading messages failed with rc=%d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) , rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) static int dasd_eckd_query_host_access(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) struct dasd_psf_query_host_access *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) struct dasd_psf_query_host_access *host_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) struct dasd_psf_prssd_data *prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) /* not available for HYPER PAV alias devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) if (!device->block && private->lcu->pav == HYPER_PAV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) /* may not be supported by the storage server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) if (!(private->features.feature[14] & 0x80))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) sizeof(struct dasd_psf_prssd_data) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) "Could not allocate read message buffer request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) return PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) if (!host_access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) dasd_sfree_request(cqr, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) "Could not allocate host_access buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) cqr->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) cqr->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) cqr->expires = 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) /* Prepare for Read Subsystem Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) prssdp = (struct dasd_psf_prssd_data *) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) prssdp->order = PSF_ORDER_PRSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) /* LSS and Volume that will be queried */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) prssdp->lss = private->ned->ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) prssdp->volume = private->ned->unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) /* all other bytes of prssdp must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) ccw->cmd_code = DASD_ECKD_CCW_PSF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) ccw->count = sizeof(struct dasd_psf_prssd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) ccw->flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) ccw->cda = (__u32)(addr_t) prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) /* Read Subsystem Data - query host access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) ccw->cmd_code = DASD_ECKD_CCW_RSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) ccw->count = sizeof(struct dasd_psf_query_host_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) ccw->flags |= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) ccw->cda = (__u32)(addr_t) host_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) /* the command might not be supported, suppress error message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) rc = dasd_sleep_on_interruptible(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) *data = *host_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) "Reading host access data failed with rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) kfree(host_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) * return number of grouped devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) static int dasd_eckd_host_access_count(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) struct dasd_psf_query_host_access *access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) struct dasd_ckd_path_group_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) struct dasd_ckd_host_information *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) access = kzalloc(sizeof(*access), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) if (!access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) "Could not allocate access buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) rc = dasd_eckd_query_host_access(device, access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) kfree(access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) info = (struct dasd_ckd_host_information *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) access->host_access_information;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) for (i = 0; i < info->entry_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) entry = (struct dasd_ckd_path_group_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) (info->entry + i * info->entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) if (entry->status_flags & DASD_ECKD_PG_GROUPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) kfree(access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) * write host access information to a sequential file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) struct dasd_psf_query_host_access *access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) struct dasd_ckd_path_group_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) struct dasd_ckd_host_information *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) char sysplex[9] = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) access = kzalloc(sizeof(*access), GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) if (!access) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) "Could not allocate access buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) rc = dasd_eckd_query_host_access(device, access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) kfree(access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) info = (struct dasd_ckd_host_information *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) access->host_access_information;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) for (i = 0; i < info->entry_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) entry = (struct dasd_ckd_path_group_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) (info->entry + i * info->entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) /* PGID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) /* FLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) seq_printf(m, "status_flags %02x\n", entry->status_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) /* SYSPLEX NAME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) EBCASC(sysplex, sizeof(sysplex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) seq_printf(m, "sysplex_name %8s\n", sysplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) /* SUPPORTED CYLINDER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) /* TIMESTAMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) seq_printf(m, "timestamp %lu\n", (unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) entry->timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) kfree(access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) * Perform Subsystem Function - CUIR response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) __u32 message_id, __u8 lpum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) struct dasd_psf_cuir_response *psf_cuir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) int pos = pathmask_to_pos(lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) sizeof(struct dasd_psf_cuir_response),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) if (IS_ERR(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) "Could not allocate PSF-CUIR request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) return PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) psf_cuir->cc = response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) psf_cuir->chpid = device->path[pos].chpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) psf_cuir->message_id = message_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) psf_cuir->cssid = device->path[pos].cssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) psf_cuir->ssid = device->path[pos].ssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) ccw->cmd_code = DASD_ECKD_CCW_PSF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) ccw->cda = (__u32)(addr_t)psf_cuir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) ccw->flags = CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) ccw->count = sizeof(struct dasd_psf_cuir_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) cqr->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) cqr->retries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) cqr->expires = 10*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) rc = dasd_sleep_on(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) * return configuration data that is referenced by record selector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) * if a record selector is specified or per default return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) * conf_data pointer for the path specified by lpum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) __u8 lpum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) struct dasd_cuir_message *cuir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) struct dasd_conf_data *conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) int path, pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) if (cuir->record_selector == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) conf_data = device->path[pos].conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) if (conf_data->gneq.record_selector ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) cuir->record_selector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177) return conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) return device->path[pathmask_to_pos(lpum)].conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) * This function determines the scope of a reconfiguration request by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) * analysing the path and device selection data provided in the CUIR request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) * Returns a path mask containing CUIR affected paths for the give device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) * If the CUIR request does not contain the required information return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) * path mask of the path the attention message for the CUIR request was reveived
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) * on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) struct dasd_cuir_message *cuir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) struct dasd_conf_data *ref_conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) unsigned long bitmask = 0, mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) struct dasd_conf_data *conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) unsigned int pos, path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) char *ref_gneq, *gneq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) char *ref_ned, *ned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) int tbcpm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) /* if CUIR request does not specify the scope use the path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) the attention message was presented on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) if (!cuir->ned_map ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) return lpum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) /* get reference conf data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) /* reference ned is determined by ned_map field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) pos = 8 - ffs(cuir->ned_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) ref_ned = (char *)&ref_conf_data->neds[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) ref_gneq = (char *)&ref_conf_data->gneq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) /* transfer 24 bit neq_map to mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) mask = cuir->neq_map[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) mask |= cuir->neq_map[1] << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) mask |= cuir->neq_map[0] << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) for (path = 0; path < 8; path++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) /* initialise data per path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) bitmask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) conf_data = device->path[path].conf_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) pos = 8 - ffs(cuir->ned_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) ned = (char *) &conf_data->neds[pos];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) /* compare reference ned and per path ned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) gneq = (char *)&conf_data->gneq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) /* compare reference gneq and per_path gneq under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) 24 bit mask where mask bit 0 equals byte 7 of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) the gneq and mask bit 24 equals byte 31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) while (bitmask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) pos = ffs(bitmask) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) clear_bit(pos, &bitmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) if (bitmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) /* device and path match the reference values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) add path to CUIR scope */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) tbcpm |= 0x80 >> path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) return tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) unsigned long paths, int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) int pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) while (paths) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) /* get position of bit in mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) pos = 8 - ffs(paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) /* get channel path descriptor from this position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) if (action == CUIR_QUIESCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) pr_warn("Service on the storage server caused path %x.%02x to go offline",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) device->path[pos].cssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) device->path[pos].chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) else if (action == CUIR_RESUME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) pr_info("Path %x.%02x is back online after service on the storage server",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) device->path[pos].cssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) device->path[pos].chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) clear_bit(7 - pos, &paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) struct dasd_cuir_message *cuir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) unsigned long tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) /* nothing to do if path is not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) if (!(dasd_path_get_opm(device) & tbcpm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) if (!(dasd_path_get_opm(device) & ~tbcpm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280) /* no path would be left if the CUIR action is taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) return error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) /* remove device from operational path mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) dasd_path_remove_opm(device, tbcpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) dasd_path_add_cuirpm(device, tbcpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) return tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) * walk through all devices and build a path mask to quiesce them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) * return an error if the last path to a device would be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) * if only part of the devices are quiesced and an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) * occurs no onlining necessary, the storage server will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) * notify the already set offline devices again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) struct dasd_cuir_message *cuir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) struct alias_pav_group *pavgroup, *tempgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) struct dasd_device *dev, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) unsigned long paths = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) int tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) /* active devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) if (tbcpm < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) paths |= tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) /* inactive devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) if (tbcpm < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) paths |= tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) /* devices in PAV groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) list_for_each_entry_safe(pavgroup, tempgroup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) &private->lcu->grouplist, group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) list_for_each_entry_safe(dev, n, &pavgroup->baselist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) spin_unlock_irqrestore(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) get_ccwdev_lock(dev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) if (tbcpm < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) paths |= tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) spin_unlock_irqrestore(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) get_ccwdev_lock(dev->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) if (tbcpm < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) paths |= tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) /* notify user about all paths affected by CUIR action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) return tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) struct dasd_cuir_message *cuir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) struct alias_pav_group *pavgroup, *tempgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) struct dasd_device *dev, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) unsigned long paths = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) int tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) * the path may have been added through a generic path event before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) * only trigger path verification if the path is not already in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372) list_for_each_entry_safe(dev, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) &private->lcu->active_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) paths |= tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) if (!(dasd_path_get_opm(dev) & tbcpm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) dasd_path_add_tbvpm(dev, tbcpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) dasd_schedule_device_bh(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) list_for_each_entry_safe(dev, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) &private->lcu->inactive_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) paths |= tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) if (!(dasd_path_get_opm(dev) & tbcpm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) dasd_path_add_tbvpm(dev, tbcpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) dasd_schedule_device_bh(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) /* devices in PAV groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) list_for_each_entry_safe(pavgroup, tempgroup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) &private->lcu->grouplist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) list_for_each_entry_safe(dev, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) &pavgroup->baselist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) paths |= tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) if (!(dasd_path_get_opm(dev) & tbcpm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) dasd_path_add_tbvpm(dev, tbcpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) dasd_schedule_device_bh(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) list_for_each_entry_safe(dev, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) &pavgroup->aliaslist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) paths |= tbcpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) if (!(dasd_path_get_opm(dev) & tbcpm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) dasd_path_add_tbvpm(dev, tbcpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) dasd_schedule_device_bh(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) /* notify user about all paths affected by CUIR action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) __u8 lpum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) struct dasd_cuir_message *cuir = messages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) int response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) DBF_DEV_EVENT(DBF_WARNING, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) "CUIR request: %016llx %016llx %016llx %08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) ((u32 *)cuir)[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) if (cuir->code == CUIR_QUIESCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) /* quiesce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) response = PSF_CUIR_LAST_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) response = PSF_CUIR_COMPLETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) } else if (cuir->code == CUIR_RESUME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) /* resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) dasd_eckd_cuir_resume(device, lpum, cuir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) response = PSF_CUIR_COMPLETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) response = PSF_CUIR_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) dasd_eckd_psf_cuir_response(device, response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) cuir->message_id, lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) DBF_DEV_EVENT(DBF_WARNING, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) "CUIR response: %d on message ID %08x", response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) cuir->message_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) /* to make sure there is no attention left schedule work again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) device->discipline->check_attention(device, lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) static void dasd_eckd_oos_resume(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) struct alias_pav_group *pavgroup, *tempgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) struct dasd_device *dev, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) spin_lock_irqsave(&private->lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) if (dev->stopped & DASD_STOPPED_NOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) dasd_generic_space_avail(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) if (dev->stopped & DASD_STOPPED_NOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471) dasd_generic_space_avail(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) /* devices in PAV groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) list_for_each_entry_safe(pavgroup, tempgroup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) &private->lcu->grouplist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) list_for_each_entry_safe(dev, n, &pavgroup->baselist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) if (dev->stopped & DASD_STOPPED_NOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) dasd_generic_space_avail(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) if (dev->stopped & DASD_STOPPED_NOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) dasd_generic_space_avail(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) spin_unlock_irqrestore(&private->lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) __u8 lpum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) struct dasd_oos_message *oos = messages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) switch (oos->code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) case REPO_WARN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) case POOL_WARN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) "Extent pool usage has reached a critical value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) dasd_eckd_oos_resume(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) case REPO_EXHAUST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) case POOL_EXHAUST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) dev_warn(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) "Extent pool is exhausted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) case REPO_RELIEVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) case POOL_RELIEVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) dev_info(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) "Extent pool physical space constraint has been relieved\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) /* In any case, update related data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) dasd_eckd_read_ext_pool_info(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) /* to make sure there is no attention left schedule work again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) device->discipline->check_attention(device, lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) static void dasd_eckd_check_attention_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) struct check_attention_work_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) struct dasd_rssd_messages *messages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) data = container_of(work, struct check_attention_work_data, worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) device = data->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) messages = kzalloc(sizeof(*messages), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) if (!messages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) "Could not allocate attention message buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) if (messages->length == ATTENTION_LENGTH_CUIR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) messages->format == ATTENTION_FORMAT_CUIR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) dasd_eckd_handle_cuir(device, messages, data->lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) if (messages->length == ATTENTION_LENGTH_OOS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) messages->format == ATTENTION_FORMAT_OOS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) dasd_eckd_handle_oos(device, messages, data->lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) kfree(messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) struct check_attention_work_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) data = kzalloc(sizeof(*data), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) data->device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) data->lpum = lpum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) schedule_work(&data->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) if (~lpum & dasd_path_get_opm(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) dasd_path_add_nohpfpm(device, lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) dasd_path_remove_opm(device, lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) "Channel path %02X lost HPF functionality and is disabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) lpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) dev_err(&device->cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) "High Performance FICON disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) private->fcx_max_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) static int dasd_eckd_hpf_enabled(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) return private->fcx_max_data ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) if (!private->fcx_max_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) /* sanity check for no HPF, the error makes no sense */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) "Trying to disable HPF for a non HPF device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) dasd_eckd_disable_hpf_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) dasd_eckd_disable_hpf_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) dasd_path_set_tbvpm(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) dasd_path_get_hpfpm(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) * prevent that any new I/O ist started on the device and schedule a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) * requeue of existing requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) dasd_schedule_requeue(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) * Initialize block layer request queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) unsigned int logical_block_size = block->bp_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) struct request_queue *q = block->request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) struct dasd_device *device = block->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) int max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) if (device->features & DASD_FEATURE_USERAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) * the max_blocks value for raw_track access is 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) * it is higher than the native ECKD value because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) * only need one ccw per track
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) * so the max_hw_sectors are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) * 2048 x 512B = 1024kB = 16 tracks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) q->limits.max_dev_sectors = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) blk_queue_logical_block_size(q, logical_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) blk_queue_max_hw_sectors(q, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) blk_queue_max_segments(q, USHRT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) /* With page sized segments each segment can be translated into one idaw/tidaw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) blk_queue_max_segment_size(q, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) blk_queue_segment_boundary(q, PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) static struct ccw_driver dasd_eckd_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) .name = "dasd-eckd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) .ids = dasd_eckd_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) .probe = dasd_eckd_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) .remove = dasd_generic_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) .set_offline = dasd_generic_set_offline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) .set_online = dasd_eckd_set_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) .notify = dasd_generic_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) .path_event = dasd_generic_path_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) .shutdown = dasd_generic_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) .freeze = dasd_generic_pm_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) .thaw = dasd_generic_restore_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) .restore = dasd_generic_restore_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) .uc_handler = dasd_generic_uc_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) .int_class = IRQIO_DAS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) static struct dasd_discipline dasd_eckd_discipline = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) .name = "ECKD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) .ebcname = "ECKD",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) .check_device = dasd_eckd_check_characteristics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) .uncheck_device = dasd_eckd_uncheck_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) .do_analysis = dasd_eckd_do_analysis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) .verify_path = dasd_eckd_verify_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) .basic_to_ready = dasd_eckd_basic_to_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) .online_to_ready = dasd_eckd_online_to_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) .basic_to_known = dasd_eckd_basic_to_known,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) .setup_blk_queue = dasd_eckd_setup_blk_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) .fill_geometry = dasd_eckd_fill_geometry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) .start_IO = dasd_start_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) .term_IO = dasd_term_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) .handle_terminated_request = dasd_eckd_handle_terminated_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) .format_device = dasd_eckd_format_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) .check_device_format = dasd_eckd_check_device_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) .erp_action = dasd_eckd_erp_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) .erp_postaction = dasd_eckd_erp_postaction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) .check_for_device_change = dasd_eckd_check_for_device_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) .build_cp = dasd_eckd_build_alias_cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) .free_cp = dasd_eckd_free_alias_cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) .dump_sense = dasd_eckd_dump_sense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) .fill_info = dasd_eckd_fill_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) .ioctl = dasd_eckd_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) .freeze = dasd_eckd_pm_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) .restore = dasd_eckd_restore_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) .reload = dasd_eckd_reload_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) .get_uid = dasd_eckd_get_uid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) .kick_validate = dasd_eckd_kick_validate_server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) .check_attention = dasd_eckd_check_attention,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) .host_access_count = dasd_eckd_host_access_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) .hosts_print = dasd_hosts_print,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) .handle_hpf_error = dasd_eckd_handle_hpf_error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) .disable_hpf = dasd_eckd_disable_hpf_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) .hpf_enabled = dasd_eckd_hpf_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) .reset_path = dasd_eckd_reset_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) .is_ese = dasd_eckd_is_ese,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) .space_allocated = dasd_eckd_space_allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) .space_configured = dasd_eckd_space_configured,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) .logical_capacity = dasd_eckd_logical_capacity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) .release_space = dasd_eckd_release_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) .ext_pool_id = dasd_eckd_ext_pool_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) .ext_size = dasd_eckd_ext_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725) .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) .ext_pool_oos = dasd_eckd_ext_pool_oos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) .ese_format = dasd_eckd_ese_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) .ese_read = dasd_eckd_ese_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) static int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) dasd_eckd_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737) ASCEBC(dasd_eckd_discipline.ebcname, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) if (!dasd_reserve_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) if (!dasd_vol_info_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) path_verification_worker = kmalloc(sizeof(*path_verification_worker),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) if (!path_verification_worker) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) kfree(dasd_reserve_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) kfree(dasd_vol_info_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) rawpadpage = (void *)__get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) if (!rawpadpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) kfree(path_verification_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) kfree(dasd_reserve_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) kfree(dasd_vol_info_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) ret = ccw_driver_register(&dasd_eckd_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) wait_for_device_probe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) kfree(path_verification_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) kfree(dasd_reserve_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) kfree(dasd_vol_info_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) free_page((unsigned long)rawpadpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) static void __exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) dasd_eckd_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) ccw_driver_unregister(&dasd_eckd_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) kfree(path_verification_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) kfree(dasd_reserve_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) free_page((unsigned long)rawpadpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) module_init(dasd_eckd_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) module_exit(dasd_eckd_cleanup);