^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * finite state machine for device handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2002, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Martin Schwidefsky (schwidefsky@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/ccwdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/cio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/chpid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "cio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "cio_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "css.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "chsc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "ioasm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "chp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int timeout_log_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static int __init ccw_timeout_log_setup(char *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) timeout_log_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) __setup("ccw_timeout_log", ccw_timeout_log_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static void ccw_timeout_log(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct schib schib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct io_subchannel_private *private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) union orb *orb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) private = to_io_private(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) orb = &private->orb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) cc = stsch(sch->schid, &schib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) "device information:\n", get_tod_clock());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) printk(KERN_WARNING "cio: orb:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) orb, sizeof(*orb), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) printk(KERN_WARNING "cio: ccw device bus id: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) printk(KERN_WARNING "cio: subchannel bus id: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) dev_name(&sch->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (orb->tm.b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) printk(KERN_WARNING "cio: orb indicates transport mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) printk(KERN_WARNING "cio: last tcw:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) (void *)(addr_t)orb->tm.tcw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) sizeof(struct tcw), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) printk(KERN_WARNING "cio: orb indicates command mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if ((void *)(addr_t)orb->cmd.cpa ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) &private->dma_area->sense_ccw ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) (void *)(addr_t)orb->cmd.cpa ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) cdev->private->dma_area->iccws)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) printk(KERN_WARNING "cio: last channel program "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) "(intern):\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) printk(KERN_WARNING "cio: last channel program:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) (void *)(addr_t)orb->cmd.cpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) sizeof(struct ccw1), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) printk(KERN_WARNING "cio: ccw device state: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) cdev->private->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) printk(KERN_WARNING "cio: schib:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) &schib, sizeof(schib), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) printk(KERN_WARNING "cio: ccw device flags:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) &cdev->private->flags, sizeof(cdev->private->flags), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ccw_device_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct ccw_device_private *priv = from_timer(priv, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct ccw_device *cdev = priv->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (timeout_log_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ccw_timeout_log(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Set timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ccw_device_set_timeout(struct ccw_device *cdev, int expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (expires == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) del_timer(&cdev->private->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (timer_pending(&cdev->private->timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (mod_timer(&cdev->private->timer, jiffies + expires))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) cdev->private->timer.expires = jiffies + expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) add_timer(&cdev->private->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ccw_device_cancel_halt_clear(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ret = cio_cancel_halt_clear(sch, &cdev->private->iretry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (ret == -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) cdev->private->dev_id.devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void ccw_device_update_sense_data(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) memset(&cdev->id, 0, sizeof(cdev->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int ccw_device_test_sense_data(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return cdev->id.cu_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) cdev->private->dma_area->senseid.cu_type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) cdev->id.cu_model ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) cdev->private->dma_area->senseid.cu_model &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cdev->id.dev_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) cdev->private->dma_area->senseid.dev_type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) cdev->id.dev_model ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) cdev->private->dma_area->senseid.dev_model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * The machine won't give us any notification by machine check if a chpid has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * been varied online on the SE so we have to find out by magic (i. e. driving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * the channel subsystem to device selection and updating our path masks).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) __recover_lost_chpids(struct subchannel *sch, int old_lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int mask, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct chp_id chpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) chp_id_init(&chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) for (i = 0; i<8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mask = 0x80 >> i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!(sch->lpm & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (old_lpm & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) chpid.id = sch->schib.pmcw.chpid[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!chp_is_registered(chpid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) css_schedule_eval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Stop device recognition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ccw_device_recog_done(struct ccw_device *cdev, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) int old_lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (cio_disable_subchannel(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) state = DEV_STATE_NOT_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Now that we tried recognition, we have performed device selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * through ssch() and the path information is up to date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) old_lpm = sch->lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Check since device may again have become not operational. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (cio_update_schib(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) state = DEV_STATE_NOT_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) sch->lpm = sch->schib.pmcw.pam & sch->opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Force reprobe on all chpids. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) old_lpm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (sch->lpm != old_lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) __recover_lost_chpids(sch, old_lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) cdev->private->flags.recog_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) cdev->private->state = DEV_STATE_DISCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (cdev->private->flags.resuming) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) cdev->private->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) cdev->private->flags.recog_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) case DEV_STATE_NOT_OPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) case DEV_STATE_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (!cdev->online) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ccw_device_update_sense_data(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) cdev->private->state = DEV_STATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) cdev->private->flags.recog_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (ccw_device_test_sense_data(cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) cdev->private->flags.donotify = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ccw_device_online(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ccw_device_update_sense_data(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) case DEV_STATE_BOXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (cdev->id.cu_type != 0) { /* device was recognized before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) cdev->private->flags.recog_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) cdev->private->state = DEV_STATE_BOXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) cdev->private->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) io_subchannel_recog_done(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Function called from device_id.c after sense id has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ccw_device_sense_id_done(struct ccw_device *cdev, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) case -ETIME: /* Sense id stopped by timeout. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ccw_device_recog_done(cdev, DEV_STATE_BOXED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * ccw_device_notify() - inform the device's driver about an event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * @cdev: device for which an event occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * @event: event that occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * -%EINVAL if the device is offline or has no driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * -%EOPNOTSUPP if the device's driver has no notifier registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * %NOTIFY_OK if the driver wants to keep the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * %NOTIFY_BAD if the driver doesn't want to keep the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int ccw_device_notify(struct ccw_device *cdev, int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!cdev->drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!cdev->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (!cdev->drv->notify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (cdev->drv->notify(cdev, event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) ret = NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ret = NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static void ccw_device_oper_notify(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Reenable channel measurements, if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* Save indication for new paths. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) cdev->private->path_new_mask = sch->vpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Driver doesn't want device back. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ccw_device_set_notoper(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * Finished with online/offline processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ccw_device_done(struct ccw_device *cdev, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ccw_device_set_timeout(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (state != DEV_STATE_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) cio_disable_subchannel(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Reset device status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) cdev->private->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) case DEV_STATE_BOXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) cdev->private->dev_id.devno, sch->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (cdev->online &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) cdev->private->flags.donotify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) case DEV_STATE_NOT_OPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) cdev->private->dev_id.devno, sch->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ccw_device_set_disconnected(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) cdev->private->flags.donotify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) case DEV_STATE_DISCONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) "%04x\n", cdev->private->dev_id.devno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) sch->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) cdev->private->state = DEV_STATE_NOT_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ccw_device_set_disconnected(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) cdev->private->flags.donotify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (cdev->private->flags.donotify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) cdev->private->flags.donotify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ccw_device_oper_notify(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Start device recognition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void ccw_device_recognition(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * We used to start here with a sense pgid to find out whether a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * is locked by someone else. Unfortunately, the sense pgid command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * code has other meanings on devices predating the path grouping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * algorithm, so we start with sense id and box the device after an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * timeout (or if sense pgid during path verification detects the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * is locked, as may happen on newer devices).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) cdev->private->flags.recog_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) cdev->private->state = DEV_STATE_SENSE_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ccw_device_sense_id_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Handle events for states that use the ccw request infrastructure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) switch (e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) case DEV_EVENT_NOTOPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ccw_request_notoper(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) case DEV_EVENT_INTERRUPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ccw_request_handler(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) case DEV_EVENT_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ccw_request_timeout(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static void ccw_device_report_path_events(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int path_event[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int chp, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) path_event[chp] = PE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) path_event[chp] |= PE_PATH_GONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (mask & cdev->private->path_new_mask & sch->vpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) path_event[chp] |= PE_PATH_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (mask & cdev->private->pgid_reset_mask & sch->vpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (cdev->online && cdev->drv->path_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) cdev->drv->path_event(cdev, path_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) static void ccw_device_reset_path_events(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) cdev->private->path_gone_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) cdev->private->path_new_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) cdev->private->pgid_reset_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void create_fake_irb(struct irb *irb, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) memset(irb, 0, sizeof(*irb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (type == FAKE_CMD_IRB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct cmd_scsw *scsw = &irb->scsw.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) scsw->cc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) scsw->fctl = SCSW_FCTL_START_FUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) scsw->actl = SCSW_ACTL_START_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) scsw->stctl = SCSW_STCTL_STATUS_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) } else if (type == FAKE_TM_IRB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct tm_scsw *scsw = &irb->scsw.tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) scsw->x = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) scsw->cc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) scsw->fctl = SCSW_FCTL_START_FUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) scsw->actl = SCSW_ACTL_START_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) scsw->stctl = SCSW_STCTL_STATUS_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ccw_device_schedule_recovery();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) cdev->private->path_broken_mask = broken_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) void ccw_device_verify_done(struct ccw_device *cdev, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* Update schib - pom may have changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (cio_update_schib(sch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Update lpm with verified path mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) sch->lpm = sch->vpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* Repeat path verification? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (cdev->private->flags.doverify) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ccw_device_verify_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) callback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ccw_device_done(cdev, DEV_STATE_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Deliver fake irb to device driver, if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (cdev->private->flags.fake_irb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) create_fake_irb(&cdev->private->dma_area->irb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) cdev->private->flags.fake_irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) cdev->private->flags.fake_irb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (cdev->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) cdev->handler(cdev, cdev->private->intparm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) &cdev->private->dma_area->irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) memset(&cdev->private->dma_area->irb, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) sizeof(struct irb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ccw_device_report_path_events(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ccw_device_handle_broken_paths(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) case -ETIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) case -EUSERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Reset oper notify indication after verify error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) cdev->private->flags.donotify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ccw_device_done(cdev, DEV_STATE_BOXED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* Reset oper notify indication after verify error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) cdev->private->flags.donotify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* Reset oper notify indication after verify error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) cdev->private->flags.donotify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ccw_device_done(cdev, DEV_STATE_NOT_OPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ccw_device_reset_path_events(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * Get device online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ccw_device_online(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if ((cdev->private->state != DEV_STATE_OFFLINE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) (cdev->private->state != DEV_STATE_BOXED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Couldn't enable the subchannel for i/o. Sick device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (ret == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* Start initial path verification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) cdev->private->state = DEV_STATE_VERIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ccw_device_verify_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ccw_device_disband_done(struct ccw_device *cdev, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ccw_device_done(cdev, DEV_STATE_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) case -ETIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ccw_device_done(cdev, DEV_STATE_BOXED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) cdev->private->flags.donotify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ccw_device_done(cdev, DEV_STATE_NOT_OPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Shutdown device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ccw_device_offline(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* Allow ccw_device_offline while disconnected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (cdev->private->state == DEV_STATE_DISCONNECTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) cdev->private->state == DEV_STATE_NOT_OPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) cdev->private->flags.donotify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ccw_device_done(cdev, DEV_STATE_NOT_OPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (cdev->private->state == DEV_STATE_BOXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ccw_device_done(cdev, DEV_STATE_BOXED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (ccw_device_is_orphan(cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ccw_device_done(cdev, DEV_STATE_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (cio_update_schib(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (scsw_actl(&sch->schib.scsw) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (cdev->private->state != DEV_STATE_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /* Are we doing path grouping? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!cdev->private->flags.pgroup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* No, set state offline immediately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) ccw_device_done(cdev, DEV_STATE_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* Start Set Path Group commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) cdev->private->state = DEV_STATE_DISBAND_PGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ccw_device_disband_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * Handle not operational event in non-special state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static void ccw_device_generic_notoper(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ccw_device_set_disconnected(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * Handle path verification event in offline state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static void ccw_device_offline_verify(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) css_schedule_eval(sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * Handle path verification event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (cdev->private->state == DEV_STATE_W4SENSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) cdev->private->flags.doverify = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * Since we might not just be coming from an interrupt from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * subchannel we have to update the schib.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (cio_update_schib(sch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ccw_device_verify_done(cdev, -ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (scsw_actl(&sch->schib.scsw) != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) SCSW_STCTL_STATUS_PEND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * No final status yet or final status not yet delivered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * to the device driver. Can't do path verification now,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * delay until final status was delivered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) cdev->private->flags.doverify = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* Device is idle, we can do the path verification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) cdev->private->state = DEV_STATE_VERIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ccw_device_verify_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * Handle path verification event in boxed state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static void ccw_device_boxed_verify(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (cdev->online) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ccw_device_done(cdev, DEV_STATE_NOT_OPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ccw_device_online_verify(cdev, dev_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) css_schedule_eval(sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * Pass interrupt to device driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static int ccw_device_call_handler(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned int stctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) int ending_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * we allow for the device action handler if .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * - we received ending status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * - the action handler requested to see all interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * - we received an intermediate status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * - fast notification was requested (primary status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * - unsolicited interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) (stctl == SCSW_STCTL_STATUS_PEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (!ending_status &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) !cdev->private->options.repall &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) !(stctl & SCSW_STCTL_INTER_STATUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) !(cdev->private->options.fast &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) (stctl & SCSW_STCTL_PRIM_STATUS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (ending_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) ccw_device_set_timeout(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (cdev->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) cdev->handler(cdev, cdev->private->intparm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) &cdev->private->dma_area->irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * Got an interrupt for a normal io (state online).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct irb *irb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int is_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) irb = this_cpu_ptr(&cio_irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) is_cmd = !scsw_is_tm(&irb->scsw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* Check for unsolicited interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (!scsw_is_solicited(&irb->scsw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) !irb->esw.esw0.erw.cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* Unit check but no sense data. Need basic sense. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (ccw_device_do_sense(cdev, irb) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) goto call_handler_unsol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) memcpy(&cdev->private->dma_area->irb, irb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) sizeof(struct irb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) cdev->private->state = DEV_STATE_W4SENSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) cdev->private->intparm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) call_handler_unsol:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (cdev->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) cdev->handler (cdev, 0, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (cdev->private->flags.doverify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) ccw_device_online_verify(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* Accumulate status and find out if a basic sense is needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ccw_device_accumulate_irb(cdev, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (is_cmd && cdev->private->flags.dosense) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (ccw_device_do_sense(cdev, irb) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) cdev->private->state = DEV_STATE_W4SENSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* Call the handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* Start delayed path verification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ccw_device_online_verify(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * Got an timeout in online state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ccw_device_set_timeout(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cdev->private->iretry = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) cdev->private->async_kill_io_rc = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ret = ccw_device_cancel_halt_clear(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (ret == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ccw_device_set_timeout(cdev, 3*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) cdev->private->state = DEV_STATE_TIMEOUT_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) else if (cdev->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) cdev->handler(cdev, cdev->private->intparm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ERR_PTR(-ETIMEDOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * Got an interrupt for a basic sense.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct irb *irb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) irb = this_cpu_ptr(&cio_irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /* Check for unsolicited interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (scsw_stctl(&irb->scsw) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (scsw_cc(&irb->scsw) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /* Basic sense hasn't started. Try again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ccw_device_do_sense(cdev, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) "interrupt during w4sense...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) cdev->private->dev_id.devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (cdev->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) cdev->handler (cdev, 0, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * Check if a halt or clear has been issued in the meanwhile. If yes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * only deliver the halt/clear interrupt to the device driver as if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * had killed the original request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (scsw_fctl(&irb->scsw) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) cdev->private->flags.dosense = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ccw_device_accumulate_irb(cdev, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) goto call_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /* Add basic sense info to irb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ccw_device_accumulate_basic_sense(cdev, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (cdev->private->flags.dosense) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* Another basic sense is needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ccw_device_do_sense(cdev, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) call_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) cdev->private->state = DEV_STATE_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* In case sensing interfered with setting the device online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Call the handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* Start delayed path verification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) ccw_device_online_verify(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ccw_device_set_timeout(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* Start delayed path verification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ccw_device_online_verify(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* OK, i/o is dead now. Call interrupt handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (cdev->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) cdev->handler(cdev, cdev->private->intparm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) ERR_PTR(cdev->private->async_kill_io_rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ret = ccw_device_cancel_halt_clear(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (ret == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) ccw_device_set_timeout(cdev, 3*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* Start delayed path verification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) ccw_device_online_verify(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (cdev->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) cdev->handler(cdev, cdev->private->intparm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ERR_PTR(cdev->private->async_kill_io_rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) void ccw_device_kill_io(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ccw_device_set_timeout(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) cdev->private->iretry = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) cdev->private->async_kill_io_rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ret = ccw_device_cancel_halt_clear(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (ret == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ccw_device_set_timeout(cdev, 3*HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) cdev->private->state = DEV_STATE_TIMEOUT_KILL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* Start delayed path verification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ccw_device_online_verify(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (cdev->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) cdev->handler(cdev, cdev->private->intparm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) ERR_PTR(-EIO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* Start verification after current task finished. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) cdev->private->flags.doverify = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* Couldn't enable the subchannel for i/o. Sick device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ccw_device_sense_id_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) void ccw_device_trigger_reprobe(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (cdev->private->state != DEV_STATE_DISCONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* Update some values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (cio_update_schib(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * The pim, pam, pom values may not be accurate, but they are the best
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * we have before performing device selection :/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) sch->lpm = sch->schib.pmcw.pam & sch->opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * Use the initial configuration since we can't be shure that the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * paths are valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) io_subchannel_init_config(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (cio_commit_config(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /* We should also udate ssd info, but this has to wait. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /* Check if this is another device which appeared on the same sch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) css_schedule_eval(sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) ccw_device_start_id(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static void ccw_device_disabled_irq(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * An interrupt in a disabled state means a previous disable was not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * successful - should not happen, but we try to disable again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) cio_disable_subchannel(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) retry_set_schib(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) cdev->private->state = DEV_STATE_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) dev_fsm_event(cdev, dev_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static void ccw_device_update_cmfblock(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) cmf_retry_copy_block(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) cdev->private->state = DEV_STATE_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) dev_fsm_event(cdev, dev_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ccw_device_set_timeout(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) cdev->private->state = DEV_STATE_NOT_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ret = ccw_device_cancel_halt_clear(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (ret == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ccw_device_set_timeout(cdev, HZ/10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) cdev->private->state = DEV_STATE_NOT_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * No operation action. This is used e.g. to ignore a timeout event in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * state offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * device statemachine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) [DEV_STATE_NOT_OPER] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) [DEV_EVENT_NOTOPER] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) [DEV_EVENT_TIMEOUT] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) [DEV_EVENT_VERIFY] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) [DEV_STATE_SENSE_ID] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) [DEV_EVENT_NOTOPER] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) [DEV_EVENT_VERIFY] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) [DEV_STATE_OFFLINE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) [DEV_EVENT_TIMEOUT] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) [DEV_STATE_VERIFY] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) [DEV_EVENT_NOTOPER] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) [DEV_STATE_ONLINE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) [DEV_EVENT_INTERRUPT] = ccw_device_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) [DEV_EVENT_VERIFY] = ccw_device_online_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) [DEV_STATE_W4SENSE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) [DEV_EVENT_TIMEOUT] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) [DEV_EVENT_VERIFY] = ccw_device_online_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) [DEV_STATE_DISBAND_PGID] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) [DEV_EVENT_NOTOPER] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) [DEV_EVENT_VERIFY] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) [DEV_STATE_BOXED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) [DEV_EVENT_INTERRUPT] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) [DEV_EVENT_TIMEOUT] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* states to wait for i/o completion before doing something */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) [DEV_STATE_TIMEOUT_KILL] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) [DEV_STATE_QUIESCE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) [DEV_EVENT_VERIFY] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /* special states for devices gone not operational */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) [DEV_STATE_DISCONNECTED] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) [DEV_EVENT_NOTOPER] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) [DEV_EVENT_TIMEOUT] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) [DEV_EVENT_VERIFY] = ccw_device_start_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) [DEV_STATE_DISCONNECTED_SENSE_ID] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) [DEV_EVENT_NOTOPER] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) [DEV_EVENT_VERIFY] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) [DEV_STATE_CMFCHANGE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) [DEV_STATE_CMFUPDATE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) [DEV_STATE_STEAL_LOCK] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) [DEV_EVENT_NOTOPER] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) [DEV_EVENT_VERIFY] = ccw_device_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) EXPORT_SYMBOL_GPL(ccw_device_set_timeout);