^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-1.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * bus driver for ccw devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2002, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author(s): Arnd Bergmann (arndb@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Cornelia Huck (cornelia.huck@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Martin Schwidefsky (schwidefsky@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define KMSG_COMPONENT "cio"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/ccwdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/cio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/param.h> /* HZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/cmb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/isc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "chp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "cio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "cio_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "css.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "ioasm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "io_sch.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "blacklist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "chsc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static struct timer_list recovery_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static DEFINE_SPINLOCK(recovery_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static int recovery_phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static const unsigned long recovery_delay[] = { 3, 30, 300 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static struct bus_type ccw_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /******************* bus type handling ***********************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* The Linux driver model distinguishes between a bus type and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * the bus itself. Of course we only have one channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * subsystem driver and one channel system per machine, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * we still use the abstraction. T.R. says it's a good idea. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ccw_bus_match (struct device * dev, struct device_driver * drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct ccw_driver *cdrv = to_ccwdrv(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) const struct ccw_device_id *ids = cdrv->ids, *found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) found = ccw_device_id_match(ids, &cdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) cdev->id.driver_info = found->driver_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Store modalias string delimited by prefix/suffix string into buffer with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * specified size. Return length of resulting string (excluding trailing '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * even if string doesn't fit buffer (snprintf semantics). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int snprint_alias(char *buf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct ccw_device_id *id, const char *suffix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (len > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) size -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (id->dev_type != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) id->dev_model, suffix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) len += snprintf(buf, size, "dtdm%s", suffix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Set up environment variables for ccw device uevent. Return 0 on success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * non-zero otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct ccw_device_id *id = &(cdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) char modalias_buf[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* CU_TYPE= */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* CU_MODEL= */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* The next two can be zero, that's ok for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* DEV_TYPE= */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* DEV_MODEL= */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* MODALIAS= */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void io_subchannel_irq(struct subchannel *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static int io_subchannel_probe(struct subchannel *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int io_subchannel_remove(struct subchannel *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void io_subchannel_shutdown(struct subchannel *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int io_subchannel_sch_event(struct subchannel *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void recovery_func(struct timer_list *unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static struct css_device_id io_subchannel_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) { /* end of list */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int io_subchannel_prepare(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Don't allow suspend while a ccw device registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * is still outstanding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) cdev = sch_get_cdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (cdev && !device_is_registered(&cdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int io_subchannel_settle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ret = wait_event_interruptible(ccw_device_init_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) atomic_read(&ccw_device_init_count) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) flush_workqueue(cio_work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static struct css_driver io_subchannel_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) .drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) .name = "io_subchannel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) .subchannel_type = io_subchannel_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) .irq = io_subchannel_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .sch_event = io_subchannel_sch_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .chp_event = io_subchannel_chp_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .probe = io_subchannel_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .remove = io_subchannel_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .shutdown = io_subchannel_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .prepare = io_subchannel_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .settle = io_subchannel_settle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int __init io_subchannel_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) timer_setup(&recovery_timer, recovery_func, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ret = bus_register(&ccw_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ret = css_driver_register(&io_subchannel_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bus_unregister(&ccw_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /************************ device handling **************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct ccw_device_id *id = &(cdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (id->dev_type != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return sprintf(buf, "%04x/%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) id->dev_type, id->dev_model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return sprintf(buf, "n/a\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct ccw_device_id *id = &(cdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return sprintf(buf, "%04x/%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) id->cu_type, id->cu_model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct ccw_device_id *id = &(cdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) len = snprint_alias(buf, PAGE_SIZE, id, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return len > PAGE_SIZE ? PAGE_SIZE : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) online_show (struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return sprintf(buf, cdev->online ? "1\n" : "0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int ccw_device_is_orphan(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void ccw_device_unregister(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (device_is_registered(&cdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* Undo device_add(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) device_del(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (cdev->private->flags.initialized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) cdev->private->flags.initialized = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Release reference from device_initialize(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static void io_subchannel_quiesce(struct subchannel *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * ccw_device_set_offline() - disable a ccw device for I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * @cdev: target ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * This function calls the driver's set_offline() function for @cdev, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * given, and then disables @cdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * %0 on success and a negative error value on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * enabled, ccw device lock not held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int ccw_device_set_offline(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int ret, state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!cdev->online || !cdev->drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (cdev->drv->set_offline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ret = cdev->drv->set_offline(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) cdev->online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Wait until a final state or DISCONNECTED is reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) while (!dev_fsm_final_state(cdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) cdev->private->state != DEV_STATE_DISCONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) cdev->private->state == DEV_STATE_DISCONNECTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) ret = ccw_device_offline(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) cdev->private->dev_id.devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (ret != -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) state = cdev->private->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) io_subchannel_quiesce(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) cdev->private->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) } while (ret == -EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) cdev->private->state == DEV_STATE_DISCONNECTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* Inform the user if set offline failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (cdev->private->state == DEV_STATE_BOXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pr_warn("%s: The device entered boxed state while being set offline\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pr_warn("%s: The device stopped operating while being set offline\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Give up reference from ccw_device_set_online(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) cdev->private->state = DEV_STATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Give up reference from ccw_device_set_online(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * ccw_device_set_online() - enable a ccw device for I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * @cdev: target ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * This function first enables @cdev and then calls the driver's set_online()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * function for @cdev, if given. If set_online() returns an error, @cdev is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * disabled again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * %0 on success and a negative error value on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * enabled, ccw device lock not held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int ccw_device_set_online(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (cdev->online || !cdev->drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Hold on to an extra reference while device is online. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (!get_device(&cdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ret = ccw_device_online(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) "device 0.%x.%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ret, cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) cdev->private->dev_id.devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Give up online reference since onlining failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Check if online processing was successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if ((cdev->private->state != DEV_STATE_ONLINE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) (cdev->private->state != DEV_STATE_W4SENSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* Inform the user that set online failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (cdev->private->state == DEV_STATE_BOXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pr_warn("%s: Setting the device online failed because it is boxed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) pr_warn("%s: Setting the device online failed because it is not operational\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* Give up online reference since onlining failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (cdev->drv->set_online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ret = cdev->drv->set_online(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) goto rollback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) cdev->online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) rollback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* Wait until a final state or DISCONNECTED is reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) while (!dev_fsm_final_state(cdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) cdev->private->state != DEV_STATE_DISCONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) cdev->private->state == DEV_STATE_DISCONNECTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ret2 = ccw_device_offline(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (ret2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) cdev->private->state == DEV_STATE_DISCONNECTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Give up online reference since onlining failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) "device 0.%x.%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ret2, cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) cdev->private->dev_id.devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) cdev->private->state = DEV_STATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* Give up online reference since onlining failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int online_store_handle_offline(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (cdev->private->state == DEV_STATE_DISCONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (cdev->drv && cdev->drv->set_offline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return ccw_device_set_offline(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static int online_store_recog_and_online(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Do device recognition, if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (cdev->private->state == DEV_STATE_BOXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ccw_device_recognition(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) wait_event(cdev->private->wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) cdev->private->flags.recog_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (cdev->private->state != DEV_STATE_OFFLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* recognition failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (cdev->drv && cdev->drv->set_online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return ccw_device_set_online(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static int online_store_handle_online(struct ccw_device *cdev, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ret = online_store_recog_and_online(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (ret && !force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (force && cdev->private->state == DEV_STATE_BOXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ret = ccw_device_stlck(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (cdev->id.cu_type == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) cdev->private->state = DEV_STATE_NOT_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ret = online_store_recog_and_online(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static ssize_t online_store (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int force, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* Prevent conflict between multiple on-/offline processing requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* Prevent conflict between internal I/Os and on-/offline processing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!dev_fsm_final_state(cdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) cdev->private->state != DEV_STATE_DISCONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Prevent conflict between pending work and on-/offline processing.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (work_pending(&cdev->private->todo_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (!strncmp(buf, "force\n", count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) force = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) force = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ret = kstrtoul(buf, 16, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) switch (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ret = online_store_handle_offline(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ret = online_store_handle_online(cdev, force);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) atomic_set(&cdev->private->onoff, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return (ret < 0) ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) available_show (struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (ccw_device_is_orphan(cdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return sprintf(buf, "no device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) switch (cdev->private->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) case DEV_STATE_BOXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return sprintf(buf, "boxed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) case DEV_STATE_DISCONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) case DEV_STATE_DISCONNECTED_SENSE_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) case DEV_STATE_NOT_OPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) sch = to_subchannel(dev->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!sch->lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return sprintf(buf, "no path\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return sprintf(buf, "no device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* All other states considered fine. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return sprintf(buf, "good\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) initiate_logging(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rc = chsc_siosl(sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) sch->schid.ssid, sch->schid.sch_no, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) sch->schid.ssid, sch->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return sprintf(buf, "%02x\n", sch->vpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static DEVICE_ATTR_RO(devtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static DEVICE_ATTR_RO(cutype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static DEVICE_ATTR_RO(modalias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static DEVICE_ATTR_RW(online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static DEVICE_ATTR(availability, 0444, available_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static DEVICE_ATTR_RO(vpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static struct attribute *io_subchannel_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) &dev_attr_logging.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) &dev_attr_vpm.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static const struct attribute_group io_subchannel_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) .attrs = io_subchannel_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static struct attribute * ccwdev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) &dev_attr_devtype.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) &dev_attr_cutype.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) &dev_attr_modalias.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) &dev_attr_online.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) &dev_attr_cmb_enable.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) &dev_attr_availability.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static const struct attribute_group ccwdev_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) .attrs = ccwdev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static const struct attribute_group *ccwdev_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) &ccwdev_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static int ccw_device_add(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct device *dev = &cdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dev->bus = &ccw_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return device_add(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static int match_dev_id(struct device *dev, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct ccw_dev_id *dev_id = (void *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * get_ccwdev_by_dev_id() - obtain device from a ccw device id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * @dev_id: id of the device to be searched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * This function searches all devices attached to the ccw bus for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * matching @dev_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * If a device is found its reference count is increased and returned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * else %NULL is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return dev ? to_ccwdev(dev) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (device_is_registered(&cdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) device_release_driver(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ret = device_attach(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) WARN_ON(ret == -ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ccw_device_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) sizeof(*cdev->private->dma_area));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* Release reference of parent subchannel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) put_device(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) kfree(cdev->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) kfree(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct gen_pool *dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) goto err_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) cdev->private = kzalloc(sizeof(struct ccw_device_private),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!cdev->private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) goto err_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) cdev->dev.dma_mask = sch->dev.dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) dma_pool = cio_gp_dma_create(&cdev->dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!dma_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) goto err_dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) cdev->private->dma_pool = dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) sizeof(*cdev->private->dma_area));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (!cdev->private->dma_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) goto err_dma_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) err_dma_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) cio_gp_dma_destroy(dma_pool, &cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) err_dma_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) kfree(cdev->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) err_priv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) kfree(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) err_cdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) static void ccw_device_todo(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static int io_subchannel_initialize_dev(struct subchannel *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct ccw_device_private *priv = cdev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) priv->cdev = cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) priv->int_class = IRQIO_CIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) priv->state = DEV_STATE_NOT_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) priv->dev_id.devno = sch->schib.pmcw.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) priv->dev_id.ssid = sch->schid.ssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) INIT_WORK(&priv->todo_work, ccw_device_todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) INIT_LIST_HEAD(&priv->cmb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) init_waitqueue_head(&priv->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) timer_setup(&priv->timer, ccw_device_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) atomic_set(&priv->onoff, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) cdev->ccwlock = sch->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) cdev->dev.parent = &sch->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) cdev->dev.release = ccw_device_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) cdev->dev.groups = ccwdev_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* Do first half of device_register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) device_initialize(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) cdev->private->dev_id.devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!get_device(&sch->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) priv->flags.initialized = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) sch_set_cdev(sch, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* Release reference from device_initialize(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) cdev = io_subchannel_allocate_dev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!IS_ERR(cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) ret = io_subchannel_initialize_dev(sch, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) cdev = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static void sch_create_and_recog_new_device(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Need to allocate a new ccw device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) cdev = io_subchannel_create_ccwdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (IS_ERR(cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* OK, we did everything we could... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) css_sch_device_unregister(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* Start recognition for the new ccw device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) io_subchannel_recog(cdev, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * Register recognized device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static void io_subchannel_register(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) int ret, adjust_init_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * Check if subchannel is still registered. It may have become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * unregistered if a machine check hit us after finishing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * device recognition but before the register work could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!device_is_registered(&sch->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) css_update_ssd_info(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * io_subchannel_register() will also be called after device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * recognition has been done for a boxed device (which will already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * be registered). We need to reprobe since we may now have sense id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (device_is_registered(&cdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (!cdev->drv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ret = device_reprobe(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* We can't do much here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) CIO_MSG_EVENT(0, "device_reprobe() returned"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) " %d for 0.%x.%04x\n", ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) cdev->private->dev_id.devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) adjust_init_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * Now we know this subchannel will stay, we can throw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * our delayed uevent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (dev_get_uevent_suppress(&sch->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) dev_set_uevent_suppress(&sch->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* make it known to the system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ret = ccw_device_add(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) cdev->private->dev_id.devno, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) spin_lock_irqsave(sch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) sch_set_cdev(sch, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) spin_unlock_irqrestore(sch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /* Release initial device reference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) cdev->private->flags.recog_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) wake_up(&cdev->private->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) wake_up(&ccw_device_init_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* Get subchannel reference for local processing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (!get_device(cdev->dev.parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) css_sch_device_unregister(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* Release subchannel reference for local processing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * subchannel recognition done. Called from the state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) io_subchannel_recog_done(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (css_init_done == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) cdev->private->flags.recog_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) switch (cdev->private->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) case DEV_STATE_BOXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* Device did not respond in time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) case DEV_STATE_NOT_OPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) cdev->private->flags.recog_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* Remove device found not operational. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (atomic_dec_and_test(&ccw_device_init_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) wake_up(&ccw_device_init_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) case DEV_STATE_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * We can't register the device in interrupt context so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * we schedule a work item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* Increase counter of devices currently in recognition. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) atomic_inc(&ccw_device_init_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* Start async. device sensing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ccw_device_recognition(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static int ccw_device_move_to_sch(struct ccw_device *cdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct subchannel *old_sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) int rc, old_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) old_sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* Obtain child reference for new parent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (!get_device(&sch->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (!sch_is_pseudo_sch(old_sch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) spin_lock_irq(old_sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) old_enabled = old_sch->schib.pmcw.ena;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (old_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) rc = cio_disable_subchannel(old_sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) spin_unlock_irq(old_sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (rc == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /* Release child reference for new parent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) mutex_lock(&sch->reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) mutex_unlock(&sch->reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) cdev->private->dev_id.devno, sch->schid.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) sch->schib.pmcw.dev, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (old_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* Try to reenable the old subchannel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) spin_lock_irq(old_sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) spin_unlock_irq(old_sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* Release child reference for new parent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* Clean up old subchannel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!sch_is_pseudo_sch(old_sch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) spin_lock_irq(old_sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) sch_set_cdev(old_sch, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) spin_unlock_irq(old_sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) css_schedule_eval(old_sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* Release child reference for old parent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) put_device(&old_sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* Initialize new subchannel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) cdev->ccwlock = sch->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (!sch_is_pseudo_sch(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) sch_set_cdev(sch, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (!sch_is_pseudo_sch(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) css_update_ssd_info(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static int ccw_device_move_to_orph(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct channel_subsystem *css = to_css(sch->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static void io_subchannel_irq(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) cdev = sch_get_cdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) CIO_TRACE_EVENT(6, "IRQ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) CIO_TRACE_EVENT(6, dev_name(&sch->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) inc_irq_stat(IRQIO_CIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) void io_subchannel_init_config(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) memset(&sch->config, 0, sizeof(sch->config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) sch->config.csense = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static void io_subchannel_init_fields(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (cio_is_console(sch->schid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) sch->opm = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) sch->opm = chp_get_sch_opm(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) sch->lpm = sch->schib.pmcw.pam & sch->opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) " - PIM = %02X, PAM = %02X, POM = %02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) sch->schib.pmcw.dev, sch->schid.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) sch->schid.sch_no, sch->schib.pmcw.pim,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) sch->schib.pmcw.pam, sch->schib.pmcw.pom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) io_subchannel_init_config(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * Note: We always return 0 so that we bind to the device even on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * This is needed so that our remove function is called on unregister.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static int io_subchannel_probe(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct io_subchannel_private *io_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (cio_is_console(sch->schid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) rc = sysfs_create_group(&sch->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) &io_subchannel_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) CIO_MSG_EVENT(0, "Failed to create io subchannel "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) "attributes for subchannel "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) "0.%x.%04x (rc=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) sch->schid.ssid, sch->schid.sch_no, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * The console subchannel already has an associated ccw_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * Throw the delayed uevent for the subchannel, register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * the ccw_device and exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (dev_get_uevent_suppress(&sch->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /* should always be the case for the console */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) dev_set_uevent_suppress(&sch->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) cdev = sch_get_cdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) rc = ccw_device_add(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* Release online reference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) goto out_schedule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (atomic_dec_and_test(&ccw_device_init_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) wake_up(&ccw_device_init_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) io_subchannel_init_fields(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) rc = cio_commit_config(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) goto out_schedule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) rc = sysfs_create_group(&sch->dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) &io_subchannel_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) goto out_schedule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* Allocate I/O subchannel private data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (!io_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) goto out_schedule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) io_priv->dma_area = dma_alloc_coherent(&sch->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) sizeof(*io_priv->dma_area),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) &io_priv->dma_area_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (!io_priv->dma_area) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) kfree(io_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) goto out_schedule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) set_io_private(sch, io_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) css_schedule_eval(sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) out_schedule:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) css_sched_sch_todo(sch, SCH_TODO_UNREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static int io_subchannel_remove(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct io_subchannel_private *io_priv = to_io_private(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) cdev = sch_get_cdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) ccw_device_unregister(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) sch_set_cdev(sch, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) set_io_private(sch, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) io_priv->dma_area, io_priv->dma_area_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) kfree(io_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static void io_subchannel_verify(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) cdev = sch_get_cdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) dev_fsm_event(cdev, DEV_EVENT_VERIFY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) cdev = sch_get_cdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (cio_update_schib(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /* Check for I/O on path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (cdev->private->state == DEV_STATE_ONLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) ccw_device_kill_io(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (cio_clear(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* Trigger path verification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) dev_fsm_event(cdev, DEV_EVENT_VERIFY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static int io_subchannel_chp_event(struct subchannel *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct chp_link *link, int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct ccw_device *cdev = sch_get_cdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) mask = chp_ssd_get_mask(&sch->ssd_info, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) case CHP_VARY_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) sch->opm &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) sch->lpm &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) cdev->private->path_gone_mask |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) io_subchannel_terminate_path(sch, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) case CHP_VARY_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) sch->opm |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) sch->lpm |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) cdev->private->path_new_mask |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) io_subchannel_verify(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) case CHP_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (cio_update_schib(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) cdev->private->path_gone_mask |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) io_subchannel_terminate_path(sch, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) case CHP_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (cio_update_schib(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) sch->lpm |= mask & sch->opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) cdev->private->path_new_mask |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) io_subchannel_verify(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static void io_subchannel_quiesce(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) cdev = sch_get_cdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (cio_is_console(sch->schid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (!sch->schib.pmcw.ena)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ret = cio_disable_subchannel(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (ret != -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (cdev->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) while (ret == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) cdev->private->state = DEV_STATE_QUIESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) cdev->private->iretry = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ret = ccw_device_cancel_halt_clear(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (ret == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ccw_device_set_timeout(cdev, HZ/10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) wait_event(cdev->private->wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) cdev->private->state != DEV_STATE_QUIESCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ret = cio_disable_subchannel(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) static void io_subchannel_shutdown(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) io_subchannel_quiesce(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static int device_is_disconnected(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return (cdev->private->state == DEV_STATE_DISCONNECTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static int recovery_check(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) int *redo = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) switch (cdev->private->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) case DEV_STATE_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) case DEV_STATE_DISCONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) cdev->private->dev_id.devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) dev_fsm_event(cdev, DEV_EVENT_VERIFY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) *redo = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) case DEV_STATE_DISCONNECTED_SENSE_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) *redo = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static void recovery_work_func(struct work_struct *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) int redo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (redo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) spin_lock_irq(&recovery_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (!timer_pending(&recovery_timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) recovery_phase++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) mod_timer(&recovery_timer, jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) recovery_delay[recovery_phase] * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) spin_unlock_irq(&recovery_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) CIO_MSG_EVENT(3, "recovery: end\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static DECLARE_WORK(recovery_work, recovery_work_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) static void recovery_func(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * We can't do our recovery in softirq context and it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * performance critical, so we schedule it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) schedule_work(&recovery_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) void ccw_device_schedule_recovery(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) CIO_MSG_EVENT(3, "recovery: schedule\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) spin_lock_irqsave(&recovery_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) recovery_phase = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) spin_unlock_irqrestore(&recovery_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static int purge_fn(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct ccw_dev_id *id = &cdev->private->dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (is_blacklisted(id->ssid, id->devno) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) (cdev->private->state == DEV_STATE_OFFLINE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) id->devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) atomic_set(&cdev->private->onoff, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* Abort loop in case of pending signal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * ccw_purge_blacklisted - purge unused, blacklisted devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * Unregister all ccw devices that are offline and on the blacklist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) int ccw_purge_blacklisted(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) void ccw_device_set_disconnected(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) ccw_device_set_timeout(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) cdev->private->flags.fake_irb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) cdev->private->state = DEV_STATE_DISCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (cdev->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) ccw_device_schedule_recovery();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) void ccw_device_set_notoper(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) CIO_TRACE_EVENT(2, "notoper");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) CIO_TRACE_EVENT(2, dev_name(&sch->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ccw_device_set_timeout(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) cio_disable_subchannel(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) cdev->private->state = DEV_STATE_NOT_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) enum io_sch_action {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) IO_SCH_UNREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) IO_SCH_ORPH_UNREG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) IO_SCH_ATTACH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) IO_SCH_UNREG_ATTACH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) IO_SCH_ORPH_ATTACH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) IO_SCH_REPROBE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) IO_SCH_VERIFY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) IO_SCH_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) IO_SCH_NOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static enum io_sch_action sch_get_action(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) cdev = sch_get_cdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (cio_update_schib(sch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /* Not operational. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return IO_SCH_UNREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return IO_SCH_UNREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) return IO_SCH_ORPH_UNREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /* Operational. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) return IO_SCH_ATTACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return IO_SCH_UNREG_ATTACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return IO_SCH_ORPH_ATTACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if ((sch->schib.pmcw.pam & sch->opm) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return IO_SCH_UNREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return IO_SCH_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (device_is_disconnected(cdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return IO_SCH_REPROBE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (cdev->online && !cdev->private->flags.resuming)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return IO_SCH_VERIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (cdev->private->state == DEV_STATE_NOT_OPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return IO_SCH_UNREG_ATTACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return IO_SCH_NOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * io_subchannel_sch_event - process subchannel event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * @sch: subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * @process: non-zero if function is called in process context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * An unspecified event occurred for this subchannel. Adjust data according
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * to the current operational state of the subchannel and device. Return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * zero when the event has been handled sufficiently or -EAGAIN when this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * function should be called again in process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static int io_subchannel_sch_event(struct subchannel *sch, int process)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) struct ccw_dev_id dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) enum io_sch_action action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) int rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) spin_lock_irqsave(sch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (!device_is_registered(&sch->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (work_pending(&sch->todo_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) cdev = sch_get_cdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (cdev && work_pending(&cdev->private->todo_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) action = sch_get_action(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) sch->schid.ssid, sch->schid.sch_no, process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) /* Perform immediate actions while holding the lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) case IO_SCH_REPROBE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /* Trigger device recognition. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) ccw_device_trigger_reprobe(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) case IO_SCH_VERIFY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* Trigger path verification. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) io_subchannel_verify(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) case IO_SCH_DISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) ccw_device_set_disconnected(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) case IO_SCH_ORPH_UNREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) case IO_SCH_ORPH_ATTACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) ccw_device_set_disconnected(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) case IO_SCH_UNREG_ATTACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) case IO_SCH_UNREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (cdev->private->state == DEV_STATE_SENSE_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * Note: delayed work triggered by this event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * and repeated calls to sch_event are synchronized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * by the above check for work_pending(cdev).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) ccw_device_set_notoper(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) case IO_SCH_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) spin_unlock_irqrestore(sch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /* All other actions require process context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (!process)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /* Handle attached ccw device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) case IO_SCH_ORPH_UNREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) case IO_SCH_ORPH_ATTACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /* Move ccw device to orphanage. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) rc = ccw_device_move_to_orph(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) case IO_SCH_UNREG_ATTACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) spin_lock_irqsave(sch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (cdev->private->flags.resuming) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /* Device will be handled later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) sch_set_cdev(sch, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) spin_unlock_irqrestore(sch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /* Unregister ccw device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) ccw_device_unregister(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) /* Handle subchannel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) case IO_SCH_ORPH_UNREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) case IO_SCH_UNREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (!cdev || !cdev->private->flags.resuming)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) css_sch_device_unregister(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) case IO_SCH_ORPH_ATTACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) case IO_SCH_UNREG_ATTACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) case IO_SCH_ATTACH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) dev_id.ssid = sch->schid.ssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) dev_id.devno = sch->schib.pmcw.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) cdev = get_ccwdev_by_dev_id(&dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (!cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) sch_create_and_recog_new_device(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) rc = ccw_device_move_to_sch(cdev, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) /* Release reference from get_ccwdev_by_dev_id() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) spin_lock_irqsave(sch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) ccw_device_trigger_reprobe(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) spin_unlock_irqrestore(sch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /* Release reference from get_ccwdev_by_dev_id() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) spin_unlock_irqrestore(sch->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static void ccw_device_set_int_class(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) struct ccw_driver *cdrv = cdev->drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /* Note: we interpret class 0 in this context as an uninitialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * field since it translates to a non-I/O interrupt class. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (cdrv->int_class != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) cdev->private->int_class = cdrv->int_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) cdev->private->int_class = IRQIO_CIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) #ifdef CONFIG_CCW_CONSOLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) int __init ccw_device_enable_console(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (!cdev->drv || !cdev->handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) io_subchannel_init_fields(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) rc = cio_commit_config(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) sch->driver = &io_subchannel_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) io_subchannel_recog(cdev, sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /* Now wait for the async. recognition to come to an end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) while (!dev_fsm_final_state(cdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) ccw_device_wait_idle(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) /* Hold on to an extra reference while device is online. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) get_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) rc = ccw_device_online(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) while (!dev_fsm_final_state(cdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) ccw_device_wait_idle(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (cdev->private->state == DEV_STATE_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) cdev->online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (rc) /* Give up online reference since onlining failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) struct io_subchannel_private *io_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) sch = cio_probe_console();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (IS_ERR(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) return ERR_CAST(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (!io_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) goto err_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) io_priv->dma_area = dma_alloc_coherent(&sch->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) sizeof(*io_priv->dma_area),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) &io_priv->dma_area_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (!io_priv->dma_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) goto err_dma_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) set_io_private(sch, io_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) cdev = io_subchannel_create_ccwdev(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (IS_ERR(cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) io_priv->dma_area, io_priv->dma_area_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) set_io_private(sch, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) kfree(io_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) cdev->drv = drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) ccw_device_set_int_class(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) return cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) err_dma_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) kfree(io_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) err_priv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) void __init ccw_device_destroy_console(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct io_subchannel_private *io_priv = to_io_private(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) set_io_private(sch, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) io_priv->dma_area, io_priv->dma_area_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) kfree(io_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * ccw_device_wait_idle() - busy wait for device to become idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * @cdev: ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * Poll until activity control is zero, that is, no function or data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) * transfer is pending/active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) * Called with device lock being held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) void ccw_device_wait_idle(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) cio_tsch(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (sch->schib.scsw.cmd.actl == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) udelay_simple(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static int ccw_device_pm_restore(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) int ccw_device_force_console(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) return ccw_device_pm_restore(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) EXPORT_SYMBOL_GPL(ccw_device_force_console);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * get_ccwdev_by_busid() - obtain device from a bus id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * @cdrv: driver the device is owned by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) * @bus_id: bus id of the device to be searched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) * This function searches all devices owned by @cdrv for a device with a bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) * id matching @bus_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * If a match is found, its reference count of the found device is increased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * and it is returned; else %NULL is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) const char *bus_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) dev = driver_find_device_by_name(&cdrv->driver, bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return dev ? to_ccwdev(dev) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) /************************** device driver handling ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) /* This is the implementation of the ccw_driver class. The probe, remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * and release methods are initially very similar to the device_driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * implementations, with the difference that they have ccw_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * A ccw driver also contains the information that is needed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * device matching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) ccw_device_probe (struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) cdev->drv = cdrv; /* to let the driver call _set_online */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) ccw_device_set_int_class(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) cdev->drv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) cdev->private->int_class = IRQIO_CIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) static int ccw_device_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) struct ccw_driver *cdrv = cdev->drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (cdrv->remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) cdrv->remove(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (cdev->online) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) cdev->online = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) ret = ccw_device_offline(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) wait_event(cdev->private->wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) dev_fsm_final_state(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) "device 0.%x.%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) ret, cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) cdev->private->dev_id.devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /* Give up reference obtained in ccw_device_set_online(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) ccw_device_set_timeout(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) cdev->drv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) cdev->private->int_class = IRQIO_CIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) io_subchannel_quiesce(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) __disable_cmf(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) static void ccw_device_shutdown(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (cdev->drv && cdev->drv->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) cdev->drv->shutdown(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) __disable_cmf(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) static int ccw_device_pm_prepare(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (work_pending(&cdev->private->todo_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) /* Fail while device is being set online/offline. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (atomic_read(&cdev->private->onoff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (cdev->online && cdev->drv && cdev->drv->prepare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return cdev->drv->prepare(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) static void ccw_device_pm_complete(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (cdev->online && cdev->drv && cdev->drv->complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) cdev->drv->complete(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) static int ccw_device_pm_freeze(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) int ret, cm_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /* Fail suspend while device is in transistional state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (!dev_fsm_final_state(cdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (!cdev->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) if (cdev->drv && cdev->drv->freeze) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) ret = cdev->drv->freeze(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) cm_enabled = cdev->private->cmb != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (cm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /* Don't have the css write on memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) ret = ccw_set_cmf(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /* From here on, disallow device driver I/O. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) ret = cio_disable_subchannel(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) static int ccw_device_pm_thaw(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) int ret, cm_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (!cdev->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /* Allow device driver I/O again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) cm_enabled = cdev->private->cmb != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (cm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) ret = ccw_set_cmf(cdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (cdev->drv && cdev->drv->thaw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) ret = cdev->drv->thaw(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) static void __ccw_device_pm_restore(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (cio_is_console(sch->schid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) cio_enable_subchannel(sch, (u32)(addr_t)sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) * While we were sleeping, devices may have gone or become
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * available again. Kick re-detection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) cdev->private->flags.resuming = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) cdev->private->path_new_mask = LPM_ANYPATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) css_sched_sch_todo(sch, SCH_TODO_EVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) css_wait_for_slow_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) /* cdev may have been moved to a different subchannel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (cdev->private->state != DEV_STATE_ONLINE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) cdev->private->state != DEV_STATE_OFFLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) ccw_device_recognition(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) cdev->private->state == DEV_STATE_DISCONNECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) cdev->private->flags.resuming = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static int resume_handle_boxed(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) cdev->private->state = DEV_STATE_BOXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) static int resume_handle_disc(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) cdev->private->state = DEV_STATE_DISCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) static int ccw_device_pm_restore(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) struct ccw_device *cdev = to_ccwdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) __ccw_device_pm_restore(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (cio_is_console(sch->schid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) goto out_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) /* check recognition results */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) switch (cdev->private->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) case DEV_STATE_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) case DEV_STATE_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) cdev->private->flags.donotify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) case DEV_STATE_BOXED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) ret = resume_handle_boxed(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) goto out_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) ret = resume_handle_disc(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) goto out_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* check if the device type has changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (!ccw_device_test_sense_data(cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) ccw_device_update_sense_data(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (!cdev->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (ccw_device_online(cdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) ret = resume_handle_disc(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) goto out_restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) /* reenable cmf, if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (cdev->private->cmb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) ret = ccw_set_cmf(cdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) "(rc=%d)\n", cdev->private->dev_id.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) cdev->private->dev_id.devno, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) out_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (cdev->online && cdev->drv && cdev->drv->restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) ret = cdev->drv->restore(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) static const struct dev_pm_ops ccw_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) .prepare = ccw_device_pm_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) .complete = ccw_device_pm_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) .freeze = ccw_device_pm_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) .thaw = ccw_device_pm_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) .restore = ccw_device_pm_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) static struct bus_type ccw_bus_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) .name = "ccw",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) .match = ccw_bus_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) .uevent = ccw_uevent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) .probe = ccw_device_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) .remove = ccw_device_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) .shutdown = ccw_device_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) .pm = &ccw_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) * ccw_driver_register() - register a ccw driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) * @cdriver: driver to be registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * This function is mainly a wrapper around driver_register().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * %0 on success and a negative error value on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) int ccw_driver_register(struct ccw_driver *cdriver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) struct device_driver *drv = &cdriver->driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) drv->bus = &ccw_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return driver_register(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * ccw_driver_unregister() - deregister a ccw driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * @cdriver: driver to be deregistered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * This function is mainly a wrapper around driver_unregister().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) void ccw_driver_unregister(struct ccw_driver *cdriver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) driver_unregister(&cdriver->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static void ccw_device_todo(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) struct ccw_device_private *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) enum cdev_todo todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) priv = container_of(work, struct ccw_device_private, todo_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) cdev = priv->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) /* Find out todo. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) spin_lock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) todo = priv->todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) priv->todo = CDEV_TODO_NOTHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) priv->dev_id.ssid, priv->dev_id.devno, todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) spin_unlock_irq(cdev->ccwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) /* Perform todo. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) switch (todo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) case CDEV_TODO_ENABLE_CMF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) cmf_reenable(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) case CDEV_TODO_REBIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) ccw_device_do_unbind_bind(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) case CDEV_TODO_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) io_subchannel_register(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) case CDEV_TODO_UNREG_EVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (!sch_is_pseudo_sch(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) css_schedule_eval(sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) case CDEV_TODO_UNREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (sch_is_pseudo_sch(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) ccw_device_unregister(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) ccw_device_call_sch_unregister(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) /* Release workqueue ref. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) * ccw_device_sched_todo - schedule ccw device operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) * @cdev: ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) * @todo: todo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) * Schedule the operation identified by @todo to be performed on the slow path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) * workqueue. Do nothing if another operation with higher priority is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) * scheduled. Needs to be called with ccwdev lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) if (cdev->private->todo >= todo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) cdev->private->todo = todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /* Get workqueue ref. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (!get_device(&cdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /* Already queued, release workqueue ref. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) * ccw_device_siosl() - initiate logging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) * @cdev: ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * This function is used to invoke model-dependent logging within the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) int ccw_device_siosl(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) return chsc_siosl(sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) EXPORT_SYMBOL_GPL(ccw_device_siosl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) EXPORT_SYMBOL(ccw_device_set_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) EXPORT_SYMBOL(ccw_device_set_offline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) EXPORT_SYMBOL(ccw_driver_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) EXPORT_SYMBOL(ccw_driver_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) EXPORT_SYMBOL(get_ccwdev_by_busid);