^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * driver for channel subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2002, 2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author(s): Arnd Bergmann (arndb@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Cornelia Huck (cornelia.huck@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define KMSG_COMPONENT "cio"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/isc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/crw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "css.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "cio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "blacklist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "cio_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "ioasm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "chsc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "idset.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "chp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) int css_init_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int max_ssid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define MAX_CSS_IDX 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static struct bus_type css_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct subchannel_id schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) init_subchannel_id(&schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) ret = fn(schid, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) } while (schid.sch_no++ < __MAX_SUBCHANNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) schid.sch_no = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) } while (schid.ssid++ < max_ssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct cb_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct idset *set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int (*fn_known_sch)(struct subchannel *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int (*fn_unknown_sch)(struct subchannel_id, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static int call_fn_known_sch(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct cb_data *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (cb->set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) idset_sch_del(cb->set, sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (cb->fn_known_sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) rc = cb->fn_known_sch(sch, cb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct cb_data *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (idset_sch_contains(cb->set, schid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) rc = cb->fn_unknown_sch(schid, cb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int call_fn_all_sch(struct subchannel_id schid, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct cb_data *cb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) sch = get_subchannel_by_schid(schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (sch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (cb->fn_known_sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) rc = cb->fn_known_sch(sch, cb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (cb->fn_unknown_sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) rc = cb->fn_unknown_sch(schid, cb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int (*fn_unknown)(struct subchannel_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct cb_data cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) cb.data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) cb.fn_known_sch = fn_known;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) cb.fn_unknown_sch = fn_unknown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (fn_known && !fn_unknown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Skip idset allocation in case of known-only loop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) cb.set = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return bus_for_each_dev(&css_bus_type, NULL, &cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) call_fn_known_sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) cb.set = idset_sch_new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (!cb.set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* fall back to brute force scanning in case of oom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return for_each_subchannel(call_fn_all_sch, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) idset_fill(cb.set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* Process registered subchannels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Process unregistered subchannels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (fn_unknown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) rc = for_each_subchannel(call_fn_unknown_sch, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) idset_free(cb.set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void css_sch_todo(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int css_sch_create_locks(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (!sch->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_lock_init(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) mutex_init(&sch->reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void css_subchannel_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) sch->config.intparm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cio_commit_config(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) kfree(sch->driver_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) kfree(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) kfree(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static int css_validate_subchannel(struct subchannel_id schid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct schib *schib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) switch (schib->pmcw.st) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) case SUBCHANNEL_TYPE_IO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) case SUBCHANNEL_TYPE_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (!css_sch_is_valid(schib))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) CIO_MSG_EVENT(6, "Blacklisted device detected "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) "at devno %04X, subchannel set %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) schib->pmcw.dev, schid.ssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) schid.ssid, schid.sch_no, schib->pmcw.st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct schib *schib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ret = css_validate_subchannel(schid, schib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) sch->schid = schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) sch->schib = *schib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) sch->st = schib->pmcw.st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ret = css_sch_create_locks(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) INIT_WORK(&sch->todo_work, css_sch_todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) sch->dev.release = &css_subchannel_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) device_initialize(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * The physical addresses of some the dma structures that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * belong to a subchannel need to fit 31 bit width (e.g. ccw).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * But we don't have such restrictions imposed on the stuff that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * is handled by the streaming API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) sch->dma_mask = DMA_BIT_MASK(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) sch->dev.dma_mask = &sch->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) kfree(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static int css_sch_device_register(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) mutex_lock(&sch->reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) sch->schid.sch_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ret = device_add(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) mutex_unlock(&sch->reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * css_sch_device_unregister - unregister a subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @sch: subchannel to be unregistered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void css_sch_device_unregister(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) mutex_lock(&sch->reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (device_is_registered(&sch->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) device_unregister(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mutex_unlock(&sch->reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) EXPORT_SYMBOL_GPL(css_sch_device_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) memset(ssd, 0, sizeof(struct chsc_ssd_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ssd->path_mask = pmcw->pim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) mask = 0x80 >> i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (pmcw->pim & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) chp_id_init(&ssd->chpid[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ssd->chpid[i].id = pmcw->chpid[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void ssd_register_chpids(struct chsc_ssd_info *ssd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) mask = 0x80 >> i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (ssd->path_mask & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) chp_new(ssd->chpid[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) void css_update_ssd_info(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ssd_register_chpids(&sch->ssd_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static ssize_t type_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return sprintf(buf, "%01x\n", sch->st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static DEVICE_ATTR_RO(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return sprintf(buf, "css:t%01X\n", sch->st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static DEVICE_ATTR_RO(modalias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static ssize_t driver_override_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) char *driver_override, *old, *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* We need to keep extra room for a newline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (count >= (PAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) driver_override = kstrndup(buf, count, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!driver_override)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) cp = strchr(driver_override, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *cp = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) old = sch->driver_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (strlen(driver_override)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) sch->driver_override = driver_override;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) kfree(driver_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) sch->driver_override = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) kfree(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static ssize_t driver_override_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) device_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) device_unlock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static DEVICE_ATTR_RW(driver_override);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static struct attribute *subch_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) &dev_attr_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) &dev_attr_modalias.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) &dev_attr_driver_override.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static struct attribute_group subch_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .attrs = subch_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static const struct attribute_group *default_subch_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) &subch_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static ssize_t chpids_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct chsc_ssd_info *ssd = &sch->ssd_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int chp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) for (chp = 0; chp < 8; chp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) mask = 0x80 >> chp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (ssd->path_mask & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ret += sprintf(buf + ret, "00 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ret += sprintf(buf + ret, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static DEVICE_ATTR_RO(chpids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static ssize_t pimpampom_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct pmcw *pmcw = &sch->schib.pmcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return sprintf(buf, "%02x %02x %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) pmcw->pim, pmcw->pam, pmcw->pom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static DEVICE_ATTR_RO(pimpampom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static ssize_t dev_busid_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct pmcw *pmcw = &sch->schib.pmcw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) pmcw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return sysfs_emit(buf, "none\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static DEVICE_ATTR_RO(dev_busid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static struct attribute *io_subchannel_type_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) &dev_attr_chpids.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) &dev_attr_pimpampom.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) &dev_attr_dev_busid.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ATTRIBUTE_GROUPS(io_subchannel_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static const struct device_type io_subchannel_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .groups = io_subchannel_type_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int css_register_subchannel(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Initialize the subchannel structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) sch->dev.parent = &channel_subsystems[0]->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sch->dev.bus = &css_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) sch->dev.groups = default_subch_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (sch->st == SUBCHANNEL_TYPE_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) sch->dev.type = &io_subchannel_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * We don't want to generate uevents for I/O subchannels that don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * have a working ccw device behind them since they will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * unregistered before they can be used anyway, so we delay the add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * uevent until after device recognition was successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * Note that we suppress the uevent for all subchannel types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * the subchannel driver can decide itself when it wants to inform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * userspace of its existence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dev_set_uevent_suppress(&sch->dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) css_update_ssd_info(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* make it known to the system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ret = css_sch_device_register(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) sch->schid.ssid, sch->schid.sch_no, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (!sch->driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * No driver matched. Generate the uevent now so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * a fitting driver module may be loaded based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * modalias.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) dev_set_uevent_suppress(&sch->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static int css_probe_device(struct subchannel_id schid, struct schib *schib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) sch = css_alloc_subchannel(schid, schib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (IS_ERR(sch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return PTR_ERR(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ret = css_register_subchannel(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) check_subchannel(struct device *dev, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct subchannel_id *schid = (void *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return schid_equal(&sch->schid, schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct subchannel *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) get_subchannel_by_schid(struct subchannel_id schid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) dev = bus_find_device(&css_bus_type, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) &schid, check_subchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return dev ? to_subchannel(dev) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * css_sch_is_valid() - check if a subchannel is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * @schib: subchannel information block for the subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int css_sch_is_valid(struct schib *schib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) EXPORT_SYMBOL_GPL(css_sch_is_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct schib schib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) int ccode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!slow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* Will be done on the slow path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * The first subchannel that is not-operational (ccode==3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * indicates that there aren't any more devices available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * If stsch gets an exception, it means the current subchannel set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * is not valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) ccode = stsch(schid, &schib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (ccode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return (ccode == 3) ? -ENXIO : ccode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return css_probe_device(schid, &schib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (sch->driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (sch->driver->sch_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ret = sch->driver->sch_event(sch, slow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dev_dbg(&sch->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) "Got subchannel machine check but "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) "no sch_event handler provided.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (ret != 0 && ret != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) sch->schid.ssid, sch->schid.sch_no, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) sch = get_subchannel_by_schid(schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (sch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ret = css_evaluate_known_subchannel(sch, slow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ret = css_evaluate_new_subchannel(schid, slow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) css_schedule_eval(schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * css_sched_sch_todo - schedule a subchannel operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * @sch: subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * @todo: todo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * Schedule the operation identified by @todo to be performed on the slow path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * workqueue. Do nothing if another operation with higher priority is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * scheduled. Needs to be called with subchannel lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) sch->schid.ssid, sch->schid.sch_no, todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (sch->todo >= todo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* Get workqueue ref. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (!get_device(&sch->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) sch->todo = todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!queue_work(cio_work_q, &sch->todo_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* Already queued, release workqueue ref. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) EXPORT_SYMBOL_GPL(css_sched_sch_todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static void css_sch_todo(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) enum sch_todo todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) sch = container_of(work, struct subchannel, todo_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Find out todo. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) todo = sch->todo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sch->schid.sch_no, todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) sch->todo = SCH_TODO_NOTHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* Perform todo. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) switch (todo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) case SCH_TODO_NOTHING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) case SCH_TODO_EVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ret = css_evaluate_known_subchannel(sch, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) css_sched_sch_todo(sch, todo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) case SCH_TODO_UNREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) css_sch_device_unregister(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* Release workqueue ref. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static struct idset *slow_subchannel_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static spinlock_t slow_subchannel_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static wait_queue_head_t css_eval_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static atomic_t css_eval_scheduled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static int __init slow_subchannel_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) spin_lock_init(&slow_subchannel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) atomic_set(&css_eval_scheduled, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) init_waitqueue_head(&css_eval_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) slow_subchannel_set = idset_sch_new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (!slow_subchannel_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static int slow_eval_known_fn(struct subchannel *sch, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int eval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) spin_lock_irq(&slow_subchannel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) eval = idset_sch_contains(slow_subchannel_set, sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) idset_sch_del(slow_subchannel_set, sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) spin_unlock_irq(&slow_subchannel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (eval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) rc = css_evaluate_known_subchannel(sch, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (rc == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) css_schedule_eval(sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * The loop might take long time for platforms with lots of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * known devices. Allow scheduling here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int eval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) spin_lock_irq(&slow_subchannel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) eval = idset_sch_contains(slow_subchannel_set, schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) idset_sch_del(slow_subchannel_set, schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) spin_unlock_irq(&slow_subchannel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (eval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) rc = css_evaluate_new_subchannel(schid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) css_schedule_eval(schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) case -ENXIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) case -EIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* These should abort looping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) spin_lock_irq(&slow_subchannel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) idset_sch_del_subseq(slow_subchannel_set, schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) spin_unlock_irq(&slow_subchannel_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* Allow scheduling here since the containing loop might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * take a while. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static void css_slow_path_func(struct work_struct *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) CIO_TRACE_EVENT(4, "slowpath");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) spin_lock_irqsave(&slow_subchannel_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (idset_is_empty(slow_subchannel_set)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) atomic_set(&css_eval_scheduled, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) wake_up(&css_eval_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) spin_unlock_irqrestore(&slow_subchannel_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct workqueue_struct *cio_work_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) void css_schedule_eval(struct subchannel_id schid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) spin_lock_irqsave(&slow_subchannel_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) idset_sch_add(slow_subchannel_set, schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) atomic_set(&css_eval_scheduled, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) queue_delayed_work(cio_work_q, &slow_path_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) spin_unlock_irqrestore(&slow_subchannel_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) void css_schedule_eval_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) spin_lock_irqsave(&slow_subchannel_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) idset_fill(slow_subchannel_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) atomic_set(&css_eval_scheduled, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) queue_delayed_work(cio_work_q, &slow_path_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) spin_unlock_irqrestore(&slow_subchannel_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static int __unset_registered(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct idset *set = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) idset_sch_del(set, sch->schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) void css_schedule_eval_all_unreg(unsigned long delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct idset *unreg_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* Find unregistered subchannels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) unreg_set = idset_sch_new();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (!unreg_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Fallback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) css_schedule_eval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) idset_fill(unreg_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* Apply to slow_subchannel_set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) spin_lock_irqsave(&slow_subchannel_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) idset_add_set(slow_subchannel_set, unreg_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) atomic_set(&css_eval_scheduled, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) queue_delayed_work(cio_work_q, &slow_path_work, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) spin_unlock_irqrestore(&slow_subchannel_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) idset_free(unreg_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) void css_wait_for_slow_path(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) flush_workqueue(cio_work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* Schedule reprobing of all unregistered subchannels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) void css_schedule_reprobe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Schedule with a delay to allow merging of subsequent calls. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) css_schedule_eval_all_unreg(1 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) EXPORT_SYMBOL_GPL(css_schedule_reprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * Called from the machine check handler for subchannel report words.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct subchannel_id mchk_schid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (overflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) css_schedule_eval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) crw0->erc, crw0->rsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (crw1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) crw1->anc, crw1->erc, crw1->rsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) init_subchannel_id(&mchk_schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) mchk_schid.sch_no = crw0->rsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (crw1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) mchk_schid.ssid = (crw1->rsid >> 4) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (crw0->erc == CRW_ERC_PMOD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) sch = get_subchannel_by_schid(mchk_schid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (sch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) css_update_ssd_info(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) put_device(&sch->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * Since we are always presented with IPI in the CRW, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * use stsch() to find out if the subchannel in question has come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * or gone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) css_evaluate_subchannel(mchk_schid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct cpuid cpu_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (css_general_characteristics.mcss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) css->global_pgid.pgid_high.ext_cssid.version = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) css->global_pgid.pgid_high.ext_cssid.cssid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) css->id_valid ? css->cssid : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) css->global_pgid.pgid_high.cpu_addr = stap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) get_cpu_id(&cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) css->global_pgid.cpu_id = cpu_id.ident;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) css->global_pgid.cpu_model = cpu_id.machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) css->global_pgid.tod_high = tod_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) static void channel_subsystem_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct channel_subsystem *css = to_css(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) mutex_destroy(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) kfree(css);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct channel_subsystem *css = to_css(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (!css->id_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return sprintf(buf, "%x\n", css->cssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) static DEVICE_ATTR_RO(real_cssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct channel_subsystem *css = to_css(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) mutex_lock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ret = sprintf(buf, "%x\n", css->cm_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) mutex_unlock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct channel_subsystem *css = to_css(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) ret = kstrtoul(buf, 16, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) mutex_lock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) mutex_unlock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return ret < 0 ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static DEVICE_ATTR_RW(cm_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return css_chsc_characteristics.secm ? attr->mode : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static struct attribute *cssdev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) &dev_attr_real_cssid.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) static struct attribute_group cssdev_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) .attrs = cssdev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static struct attribute *cssdev_cm_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) &dev_attr_cm_enable.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) static struct attribute_group cssdev_cm_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) .attrs = cssdev_cm_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) .is_visible = cm_enable_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static const struct attribute_group *cssdev_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) &cssdev_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) &cssdev_cm_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) static int __init setup_css(int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct channel_subsystem *css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) css = kzalloc(sizeof(*css), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!css)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) channel_subsystems[nr] = css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) dev_set_name(&css->device, "css%x", nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) css->device.groups = cssdev_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) css->device.release = channel_subsystem_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * We currently allocate notifier bits with this (using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * css->device as the device argument with the DMA API)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * and are fine with 64 bit addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) css->device.coherent_dma_mask = DMA_BIT_MASK(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) css->device.dma_mask = &css->device.coherent_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) mutex_init(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) css->id_valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) pr_info("Partition identifier %01x.%01x\n", css->cssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) css->iid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ret = device_register(&css->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) put_device(&css->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (!css->pseudo_subchannel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) device_unregister(&css->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) css->pseudo_subchannel->dev.parent = &css->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) css->pseudo_subchannel->dev.release = css_subchannel_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) mutex_init(&css->pseudo_subchannel->reg_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ret = css_sch_create_locks(css->pseudo_subchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) kfree(css->pseudo_subchannel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) device_unregister(&css->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) dev_set_name(&css->pseudo_subchannel->dev, "defunct");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ret = device_register(&css->pseudo_subchannel->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) put_device(&css->pseudo_subchannel->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) device_unregister(&css->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) channel_subsystems[nr] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static int css_reboot_event(struct notifier_block *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct channel_subsystem *css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) for_each_css(css) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) mutex_lock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (css->cm_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (chsc_secm(css, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) ret = NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) mutex_unlock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static struct notifier_block css_reboot_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) .notifier_call = css_reboot_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * Since the css devices are neither on a bus nor have a class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * nor have a special device type, we cannot stop/restart channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * path measurements via the normal suspend/resume callbacks, but have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * to use notifiers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static int css_power_event(struct notifier_block *this, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct channel_subsystem *css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) case PM_HIBERNATION_PREPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) case PM_SUSPEND_PREPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) for_each_css(css) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) mutex_lock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (!css->cm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) mutex_unlock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ret = __chsc_do_secm(css, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ret = notifier_from_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) mutex_unlock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) case PM_POST_HIBERNATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) case PM_POST_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) for_each_css(css) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) mutex_lock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (!css->cm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) mutex_unlock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ret = __chsc_do_secm(css, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) ret = notifier_from_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) mutex_unlock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /* search for subchannels, which appeared during hibernation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) css_schedule_reprobe();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static struct notifier_block css_power_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) .notifier_call = css_power_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static struct gen_pool *cio_dma_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /* Currently cio supports only a single css */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct device *cio_get_dma_css_dev(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return &channel_subsystems[0]->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct gen_pool *gp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) void *cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) gp_dma = gen_pool_create(3, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (!gp_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) for (i = 0; i < nr_pages; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) CIO_DMA_GFP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (!cpu_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return gp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) dma_addr, PAGE_SIZE, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) return gp_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static void __gp_dma_free_dma(struct gen_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct gen_pool_chunk *chunk, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dma_free_coherent((struct device *) data, chunk_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) (void *) chunk->start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) (dma_addr_t) chunk->phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (!gp_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* this is quite ugly but no better idea */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) gen_pool_destroy(gp_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static int cio_dma_pool_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* No need to free up the resources: compiled in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!cio_dma_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) size_t chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (!gp_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) addr = gen_pool_alloc(gp_dma, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) while (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) chunk_size = round_up(size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) addr = (unsigned long) dma_alloc_coherent(dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) chunk_size, &dma_addr, CIO_DMA_GFP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) addr = gen_pool_alloc(gp_dma, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return (void *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (!cpu_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) memset(cpu_addr, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * Allocate dma memory from the css global pool. Intended for memory not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * specific to any single device within the css. The allocated memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * is not guaranteed to be 31-bit addressable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * Caution: Not suitable for early stuff like console.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) void *cio_dma_zalloc(size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) void cio_dma_free(void *cpu_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * Now that the driver core is running, we can setup our channel subsystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * The struct subchannel's are created during probing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static int __init css_bus_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ret = chsc_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) chsc_determine_css_characteristics();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* Try to enable MSS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) max_ssid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) else /* Success. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) max_ssid = __MAX_SSID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) ret = slow_subchannel_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if ((ret = bus_register(&css_bus_type)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) /* Setup css structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) for (i = 0; i <= MAX_CSS_IDX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) ret = setup_css(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) goto out_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ret = register_reboot_notifier(&css_reboot_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) goto out_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) ret = register_pm_notifier(&css_power_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) goto out_unregister_rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) ret = cio_dma_pool_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) goto out_unregister_pmn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) airq_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) css_init_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /* Enable default isc for I/O subchannels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) isc_register(IO_SCH_ISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) out_unregister_pmn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) unregister_pm_notifier(&css_power_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) out_unregister_rn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) unregister_reboot_notifier(&css_reboot_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) out_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) while (i-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct channel_subsystem *css = channel_subsystems[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) device_unregister(&css->pseudo_subchannel->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) device_unregister(&css->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) bus_unregister(&css_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) crw_unregister_handler(CRW_RSC_SCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) idset_free(slow_subchannel_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) chsc_init_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) pr_alert("The CSS device driver initialization failed with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) "errno=%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static void __init css_bus_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct channel_subsystem *css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) for_each_css(css) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) device_unregister(&css->pseudo_subchannel->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) device_unregister(&css->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) bus_unregister(&css_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) crw_unregister_handler(CRW_RSC_SCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) idset_free(slow_subchannel_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) chsc_init_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) isc_unregister(IO_SCH_ISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static int __init channel_subsystem_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) ret = css_bus_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) cio_work_q = create_singlethread_workqueue("cio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (!cio_work_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) goto out_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) ret = io_subchannel_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) goto out_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /* Register subchannels which are already in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) cio_register_early_subchannels();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* Start initial subchannel evaluation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) css_schedule_eval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) out_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) destroy_workqueue(cio_work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) out_bus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) css_bus_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) subsys_initcall(channel_subsystem_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static int css_settle(struct device_driver *drv, void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct css_driver *cssdrv = to_cssdriver(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (cssdrv->settle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return cssdrv->settle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) int css_complete_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /* Wait for the evaluation of subchannels to finish. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) ret = wait_event_interruptible(css_eval_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) atomic_read(&css_eval_scheduled) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) flush_workqueue(cio_work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* Wait for the subchannel type specific initialization to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * Wait for the initialization of devices to finish, to make sure we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * done with our setup if the search for the root device starts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) static int __init channel_subsystem_init_sync(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) css_complete_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) subsys_initcall_sync(channel_subsystem_init_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static ssize_t cio_settle_write(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* Handle pending CRW's. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) crw_wait_for_channel_report();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) ret = css_complete_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static const struct proc_ops cio_settle_proc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) .proc_open = nonseekable_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .proc_write = cio_settle_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .proc_lseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static int __init cio_settle_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) struct proc_dir_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) device_initcall(cio_settle_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) #endif /*CONFIG_PROC_FS*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int sch_is_pseudo_sch(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (!sch->dev.parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return sch == to_css(sch->dev.parent)->pseudo_subchannel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int css_bus_match(struct device *dev, struct device_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct css_driver *driver = to_cssdriver(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct css_device_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /* When driver_override is set, only bind to the matching driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (sch->driver_override && strcmp(sch->driver_override, drv->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) for (id = driver->subchannel_type; id->match_flags; id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (sch->st == id->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) static int css_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) sch->driver = to_cssdriver(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) sch->driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) static int css_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) sch->driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static void css_shutdown(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct subchannel *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (sch->driver && sch->driver->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) sch->driver->shutdown(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) ret = add_uevent_var(env, "ST=%01X", sch->st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) static int css_pm_prepare(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct css_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (mutex_is_locked(&sch->reg_mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (!sch->dev.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) drv = to_cssdriver(sch->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /* Notify drivers that they may not register children. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return drv->prepare ? drv->prepare(sch) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static void css_pm_complete(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct css_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (!sch->dev.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) drv = to_cssdriver(sch->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (drv->complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) drv->complete(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static int css_pm_freeze(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) struct css_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (!sch->dev.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) drv = to_cssdriver(sch->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return drv->freeze ? drv->freeze(sch) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static int css_pm_thaw(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct css_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (!sch->dev.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) drv = to_cssdriver(sch->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return drv->thaw ? drv->thaw(sch) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static int css_pm_restore(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct subchannel *sch = to_subchannel(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct css_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) css_update_ssd_info(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (!sch->dev.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) drv = to_cssdriver(sch->dev.driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return drv->restore ? drv->restore(sch) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static const struct dev_pm_ops css_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) .prepare = css_pm_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) .complete = css_pm_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) .freeze = css_pm_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) .thaw = css_pm_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) .restore = css_pm_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) static struct bus_type css_bus_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) .name = "css",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) .match = css_bus_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) .probe = css_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) .remove = css_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) .shutdown = css_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) .uevent = css_uevent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) .pm = &css_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * css_driver_register - register a css driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * @cdrv: css driver to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * This is mainly a wrapper around driver_register that sets name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) * and bus_type in the embedded struct device_driver correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) int css_driver_register(struct css_driver *cdrv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) cdrv->drv.bus = &css_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return driver_register(&cdrv->drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) EXPORT_SYMBOL_GPL(css_driver_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * css_driver_unregister - unregister a css driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * @cdrv: css driver to unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * This is a wrapper around driver_unregister.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) void css_driver_unregister(struct css_driver *cdrv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) driver_unregister(&cdrv->drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) EXPORT_SYMBOL_GPL(css_driver_unregister);