^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PAV alias management for the DASD ECKD discipline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author(s): Stefan Weinhuber <wein@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define KMSG_COMPONENT "dasd-eckd"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/ebcdic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "dasd_int.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "dasd_eckd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #ifdef PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #undef PRINTK_HEADER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #endif /* PRINTK_HEADER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define PRINTK_HEADER "dasd(eckd):"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * General concept of alias management:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * - PAV and DASD alias management is specific to the eckd discipline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * - A device is connected to an lcu as long as the device exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * dasd_alias_make_device_known_to_lcu will be called wenn the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * device is checked by the eckd discipline and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * dasd_alias_disconnect_device_from_lcu will be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * before the device is deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * - The dasd_alias_add_device / dasd_alias_remove_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * functions mark the point when a device is 'ready for service'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * - A summary unit check is a rare occasion, but it is mandatory to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * support it. It requires some complex recovery actions before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * devices can be used again (see dasd_alias_handle_summary_unit_check).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * - dasd_alias_get_start_dev will find an alias device that can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * instead of the base device and does some (very simple) load balancing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * This is the function that gets called for each I/O, so when improving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * something, this function should get faster or better, the rest has just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * to be correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static void summary_unit_check_handling_work(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void lcu_update_work(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static struct alias_root aliastree = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static struct alias_server *_find_server(struct dasd_uid *uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct alias_server *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) list_for_each_entry(pos, &aliastree.serverlist, server) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!strncmp(pos->uid.vendor, uid->vendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) sizeof(uid->vendor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) && !strncmp(pos->uid.serial, uid->serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) sizeof(uid->serial)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static struct alias_lcu *_find_lcu(struct alias_server *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct dasd_uid *uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct alias_lcu *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) list_for_each_entry(pos, &server->lculist, lcu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (pos->uid.ssid == uid->ssid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct dasd_uid *uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct alias_pav_group *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __u8 search_unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* for hyper pav there is only one group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (lcu->pav == HYPER_PAV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (list_empty(&lcu->grouplist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return list_first_entry(&lcu->grouplist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct alias_pav_group, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* for base pav we have to find the group that matches the base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (uid->type == UA_BASE_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) search_unit_addr = uid->real_unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) search_unit_addr = uid->base_unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) list_for_each_entry(pos, &lcu->grouplist, group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (pos->uid.base_unit_addr == search_unit_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static struct alias_server *_allocate_server(struct dasd_uid *uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct alias_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) server = kzalloc(sizeof(*server), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) INIT_LIST_HEAD(&server->server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) INIT_LIST_HEAD(&server->lculist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void _free_server(struct alias_server *server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) kfree(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct alias_lcu *lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!lcu->uac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) goto out_err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!lcu->rsu_cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) goto out_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!lcu->rsu_cqr->cpaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) goto out_err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (!lcu->rsu_cqr->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) goto out_err4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) lcu->uid.ssid = uid->ssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) lcu->pav = NO_PAV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) INIT_LIST_HEAD(&lcu->lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) INIT_LIST_HEAD(&lcu->inactive_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) INIT_LIST_HEAD(&lcu->active_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) INIT_LIST_HEAD(&lcu->grouplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_lock_init(&lcu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) init_completion(&lcu->lcu_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) out_err4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) kfree(lcu->rsu_cqr->cpaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) out_err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) kfree(lcu->rsu_cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) out_err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) kfree(lcu->uac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) out_err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) kfree(lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void _free_lcu(struct alias_lcu *lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) kfree(lcu->rsu_cqr->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) kfree(lcu->rsu_cqr->cpaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) kfree(lcu->rsu_cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) kfree(lcu->uac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) kfree(lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * This is the function that will allocate all the server and lcu data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * so this function must be called first for a new device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * If the return value is 1, the lcu was already known before, if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * is 0, this is a new lcu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Negative return code indicates that something went wrong (e.g. -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct alias_server *server, *newserver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct alias_lcu *lcu, *newlcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct dasd_uid uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) device->discipline->get_uid(device, &uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) spin_lock_irqsave(&aliastree.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) server = _find_server(&uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (!server) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) spin_unlock_irqrestore(&aliastree.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) newserver = _allocate_server(&uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (IS_ERR(newserver))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return PTR_ERR(newserver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) spin_lock_irqsave(&aliastree.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) server = _find_server(&uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!server) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) list_add(&newserver->server, &aliastree.serverlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) server = newserver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* someone was faster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) _free_server(newserver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) lcu = _find_lcu(server, &uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!lcu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) spin_unlock_irqrestore(&aliastree.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) newlcu = _allocate_lcu(&uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (IS_ERR(newlcu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return PTR_ERR(newlcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) spin_lock_irqsave(&aliastree.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) lcu = _find_lcu(server, &uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!lcu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) list_add(&newlcu->lcu, &server->lculist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) lcu = newlcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* someone was faster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) _free_lcu(newlcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) spin_lock(&lcu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) list_add(&device->alias_list, &lcu->inactive_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) private->lcu = lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) spin_unlock(&lcu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) spin_unlock_irqrestore(&aliastree.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * This function removes a device from the scope of alias management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * The complicated part is to make sure that it is not in use by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * any of the workers. If necessary cancel the work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct alias_lcu *lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct alias_server *server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int was_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct dasd_uid uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) lcu = private->lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* nothing to do if already disconnected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) device->discipline->get_uid(device, &uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* make sure that the workers don't use this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (device == lcu->suc_data.device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) cancel_work_sync(&lcu->suc_data.worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (device == lcu->suc_data.device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) lcu->suc_data.device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) was_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (device == lcu->ruac_data.device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) was_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) cancel_delayed_work_sync(&lcu->ruac_data.dwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (device == lcu->ruac_data.device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) lcu->ruac_data.device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) private->lcu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) spin_lock_irqsave(&aliastree.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) spin_lock(&lcu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) list_del_init(&device->alias_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (list_empty(&lcu->grouplist) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) list_empty(&lcu->active_devices) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) list_empty(&lcu->inactive_devices)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) list_del(&lcu->lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) spin_unlock(&lcu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) _free_lcu(lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) lcu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (was_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) _schedule_lcu_update(lcu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) spin_unlock(&lcu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) server = _find_server(&uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (server && list_empty(&server->lculist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) list_del(&server->server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) _free_server(server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) spin_unlock_irqrestore(&aliastree.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * This function assumes that the unit address configuration stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * in the lcu is up to date and will update the device uid before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * adding it to a pav group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static int _add_device_to_lcu(struct alias_lcu *lcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct dasd_device *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct alias_pav_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct dasd_uid uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) spin_lock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) private->uid.base_unit_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) lcu->uac->unit[private->uid.real_unit_addr].base_ua;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) uid = private->uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) spin_unlock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /* if we have no PAV anyway, we don't need to bother with PAV groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (lcu->pav == NO_PAV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) list_move(&device->alias_list, &lcu->active_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) group = _find_group(lcu, &uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) group = kzalloc(sizeof(*group), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) group->uid.ssid = uid.ssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (uid.type == UA_BASE_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) group->uid.base_unit_addr = uid.real_unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) group->uid.base_unit_addr = uid.base_unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) INIT_LIST_HEAD(&group->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) INIT_LIST_HEAD(&group->baselist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) INIT_LIST_HEAD(&group->aliaslist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) list_add(&group->group, &lcu->grouplist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (uid.type == UA_BASE_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) list_move(&device->alias_list, &group->baselist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) list_move(&device->alias_list, &group->aliaslist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) private->pavgroup = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void _remove_device_from_lcu(struct alias_lcu *lcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct alias_pav_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) list_move(&device->alias_list, &lcu->inactive_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) group = private->pavgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) private->pavgroup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) list_del(&group->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) kfree(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (group->next == device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) group->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) suborder_not_supported(struct dasd_ccw_req *cqr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) char *sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) char reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) char msg_format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) char msg_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * intrc values ENODEV, ENOLINK and EPERM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * will be optained from sleep_on to indicate that no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * IO operation can be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (cqr->intrc == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (cqr->intrc == -ENOLINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (cqr->intrc == -EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) sense = dasd_get_sense(&cqr->irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!sense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) reason = sense[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) msg_format = (sense[7] & 0xF0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) msg_no = (sense[7] & 0x0F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* command reject, Format 0 MSG 4 - invalid parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static int read_unit_address_configuration(struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct alias_lcu *lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct dasd_psf_prssd_data *prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) (sizeof(struct dasd_psf_prssd_data)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) device, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (IS_ERR(cqr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return PTR_ERR(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) cqr->retries = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) cqr->expires = 20 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Prepare for Read Subsystem Data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) prssdp = (struct dasd_psf_prssd_data *) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) prssdp->order = PSF_ORDER_PRSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) prssdp->suborder = 0x0e; /* Read unit address configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* all other bytes of prssdp must be zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ccw->cmd_code = DASD_ECKD_CCW_PSF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ccw->count = sizeof(struct dasd_psf_prssd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ccw->flags |= CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ccw->cda = (__u32)(addr_t) prssdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Read Subsystem Data - feature codes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) memset(lcu->uac, 0, sizeof(*(lcu->uac)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ccw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ccw->cmd_code = DASD_ECKD_CCW_RSSD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ccw->count = sizeof(*(lcu->uac));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ccw->cda = (__u32)(addr_t) lcu->uac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* need to unset flag here to detect race with summary unit check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) lcu->flags &= ~NEED_UAC_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) rc = dasd_sleep_on(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (suborder_not_supported(cqr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* suborder not supported or device unusable for IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* IO failed but should be retried */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) lcu->flags |= NEED_UAC_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dasd_sfree_request(cqr, cqr->memdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct alias_pav_group *pavgroup, *tempgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct dasd_device *device, *tempdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct dasd_eckd_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) list_move(&device->alias_list, &lcu->active_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) private->pavgroup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) list_move(&device->alias_list, &lcu->active_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) private->pavgroup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) list_del(&pavgroup->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) kfree(pavgroup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) rc = read_unit_address_configuration(refdev, lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * there is another update needed skip the remaining handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * the data might already be outdated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * but especially do not add the device to an LCU with pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (lcu->flags & NEED_UAC_UPDATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) lcu->pav = NO_PAV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) switch (lcu->uac->unit[i].ua_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case UA_BASE_PAV_ALIAS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) lcu->pav = BASE_PAV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) case UA_HYPER_PAV_ALIAS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) lcu->pav = HYPER_PAV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (lcu->pav != NO_PAV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) _add_device_to_lcu(lcu, device, refdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static void lcu_update_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct alias_lcu *lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct read_uac_work_data *ruac_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) device = ruac_data->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) rc = _lcu_update(device, lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * Need to check flags again, as there could have been another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * prepare_update or a new device a new device while we were still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * processing the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) " alias data in lcu (rc = %d), retry later", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) lcu->ruac_data.device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) lcu->flags &= ~UPDATE_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static int _schedule_lcu_update(struct alias_lcu *lcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct dasd_device *usedev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct alias_pav_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) lcu->flags |= NEED_UAC_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (lcu->ruac_data.device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* already scheduled or running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (device && !list_empty(&device->alias_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) usedev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (!usedev && !list_empty(&lcu->grouplist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) group = list_first_entry(&lcu->grouplist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct alias_pav_group, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!list_empty(&group->baselist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) usedev = list_first_entry(&group->baselist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct dasd_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) alias_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) else if (!list_empty(&group->aliaslist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) usedev = list_first_entry(&group->aliaslist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct dasd_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) alias_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!usedev && !list_empty(&lcu->active_devices)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) usedev = list_first_entry(&lcu->active_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct dasd_device, alias_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * if we haven't found a proper device yet, give up for now, the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * device that will be set active will trigger an lcu update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (!usedev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dasd_get_device(usedev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) lcu->ruac_data.device = usedev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) dasd_put_device(usedev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int dasd_alias_add_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) __u8 uaddr = private->uid.real_unit_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct alias_lcu *lcu = private->lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * Check if device and lcu type differ. If so, the uac data may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * outdated and needs to be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) lcu->flags |= UPDATE_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) "uid type mismatch - trigger rescan");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (!(lcu->flags & UPDATE_PENDING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) rc = _add_device_to_lcu(lcu, device, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) lcu->flags |= UPDATE_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (lcu->flags & UPDATE_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) list_move(&device->alias_list, &lcu->active_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) private->pavgroup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) _schedule_lcu_update(lcu, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) int dasd_alias_update_add_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) private->lcu->flags |= UPDATE_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return dasd_alias_add_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int dasd_alias_remove_device(struct dasd_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct alias_lcu *lcu = private->lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* nothing to do if already removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (!lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) _remove_device_from_lcu(lcu, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct dasd_eckd_private *alias_priv, *private = base_device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct alias_pav_group *group = private->pavgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct alias_lcu *lcu = private->lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct dasd_device *alias_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!group || !lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (lcu->pav == NO_PAV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (unlikely(!(private->features.feature[8] & 0x01))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * PAV enabled but prefix not, very unlikely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * seems to be a lost pathgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * use base device to do IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) "Prefix not enabled with PAV enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) alias_device = group->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (!alias_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (list_empty(&group->aliaslist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) alias_device = list_first_entry(&group->aliaslist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct dasd_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) alias_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (list_is_last(&alias_device->alias_list, &group->aliaslist))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) group->next = list_first_entry(&group->aliaslist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct dasd_device, alias_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) group->next = list_first_entry(&alias_device->alias_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct dasd_device, alias_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) alias_priv = alias_device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if ((alias_priv->count < private->count) && !alias_device->stopped &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return alias_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Summary unit check handling depends on the way alias devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * are handled so it is done here rather then in dasd_eckd.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static int reset_summary_unit_check(struct alias_lcu *lcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct dasd_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) char reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct dasd_ccw_req *cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct ccw1 *ccw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) cqr = lcu->rsu_cqr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) memcpy((char *) &cqr->magic, "ECKD", 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ASCEBC((char *) &cqr->magic, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ccw = cqr->cpaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) ccw->cmd_code = DASD_ECKD_CCW_RSCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ccw->flags = CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ccw->count = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ccw->cda = (__u32)(addr_t) cqr->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ((char *)cqr->data)[0] = reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) cqr->retries = 255; /* set retry counter to enable basic ERP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) cqr->startdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) cqr->memdev = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) cqr->block = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) cqr->expires = 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) cqr->buildclk = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) cqr->status = DASD_CQR_FILLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) rc = dasd_sleep_on_immediatly(cqr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct alias_pav_group *pavgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct dasd_eckd_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* active and inactive list can contain alias as well as base devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) list_for_each_entry(device, &lcu->active_devices, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (private->uid.type != UA_BASE_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (private->uid.type != UA_BASE_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) list_for_each_entry(pavgroup, &lcu->grouplist, group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) list_for_each_entry(device, &pavgroup->baselist, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) dasd_schedule_block_bh(device->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) dasd_schedule_device_bh(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct alias_pav_group *pavgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct dasd_device *device, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct dasd_eckd_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) LIST_HEAD(active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * Problem here ist that dasd_flush_device_queue may wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * for termination of a request to complete. We can't keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * the lcu lock during that time, so we must assume that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * the lists may have changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * Idea: first gather all active alias devices in a separate list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * then flush the first element of this list unlocked, and afterwards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * check if it is still on the list before moving it to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * active_devices list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) list_for_each_entry_safe(device, temp, &lcu->active_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (private->uid.type == UA_BASE_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) list_move(&device->alias_list, &active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) list_for_each_entry(pavgroup, &lcu->grouplist, group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) list_splice_init(&pavgroup->aliaslist, &active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) while (!list_empty(&active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) device = list_first_entry(&active, struct dasd_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) alias_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) dasd_flush_device_queue(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * only move device around if it wasn't moved away while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * were waiting for the flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (device == list_first_entry(&active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct dasd_device, alias_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) list_move(&device->alias_list, &lcu->active_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) private->pavgroup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct alias_pav_group *pavgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) list_for_each_entry(device, &lcu->active_devices, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) spin_lock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) spin_unlock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) spin_lock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) spin_unlock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) list_for_each_entry(pavgroup, &lcu->grouplist, group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) list_for_each_entry(device, &pavgroup->baselist, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) spin_lock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) spin_unlock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) spin_lock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) spin_unlock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct alias_pav_group *pavgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) list_for_each_entry(device, &lcu->active_devices, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) spin_lock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) spin_unlock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) spin_lock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) spin_unlock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) list_for_each_entry(pavgroup, &lcu->grouplist, group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) list_for_each_entry(device, &pavgroup->baselist, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) spin_lock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) spin_unlock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) spin_lock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) spin_unlock(get_ccwdev_lock(device->cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static void summary_unit_check_handling_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct alias_lcu *lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct summary_unit_check_work_data *suc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct dasd_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) suc_data = container_of(work, struct summary_unit_check_work_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) lcu = container_of(suc_data, struct alias_lcu, suc_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) device = suc_data->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /* 1. flush alias devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) flush_all_alias_devices_on_lcu(lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* 2. reset summary unit check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dasd_device_remove_stop_bits(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) reset_summary_unit_check(lcu, device, suc_data->reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) _unstop_all_devices_on_lcu(lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) _restart_all_base_devices_on_lcu(lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) /* 3. read new alias configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) _schedule_lcu_update(lcu, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) lcu->suc_data.device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) void dasd_alias_handle_summary_unit_check(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct dasd_device *device = container_of(work, struct dasd_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) suc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct dasd_eckd_private *private = device->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct alias_lcu *lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) lcu = private->lcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (!lcu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) "device not ready to handle summary"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) " unit check (no lcu structure)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) spin_lock_irqsave(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* If this device is about to be removed just return and wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * the next interrupt on a different device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (list_empty(&device->alias_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) "device is in offline processing,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) " don't do summary unit check handling");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (lcu->suc_data.device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /* already scheduled or running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) DBF_DEV_EVENT(DBF_WARNING, device, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) "previous instance of summary unit check worker"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) " still pending");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) _stop_all_devices_on_lcu(lcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* prepare for lcu_update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) lcu->suc_data.reason = private->suc_reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) lcu->suc_data.device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) dasd_get_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (!schedule_work(&lcu->suc_data.worker))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) spin_unlock_irqrestore(&lcu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) clear_bit(DASD_FLAG_SUC, &device->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) dasd_put_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) };