Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *    Copyright IBM Corp. 1999, 2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *		 Arnd Bergmann (arndb@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/chpid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/sclp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/crw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "cio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "css.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "ioasm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include "cio_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include "chp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define to_channelpath(device) container_of(device, struct channel_path, dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define CHP_INFO_UPDATE_INTERVAL	1*HZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) enum cfg_task_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	cfg_none,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	cfg_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	cfg_deconfigure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /* Map for pending configure tasks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static DEFINE_SPINLOCK(cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /* Map for channel-path status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) static struct sclp_chp_info chp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static DEFINE_MUTEX(info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) /* Time after which channel-path status may be outdated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static unsigned long chp_info_expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static struct work_struct cfg_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /* Wait queue for configure completion events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static wait_queue_head_t cfg_wait_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) /* Set vary state for given chpid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static void set_chp_logically_online(struct chp_id chpid, int onoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	chpid_to_chp(chpid)->state = onoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /* On success return 0 if channel-path is varied offline, 1 if it is varied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * online. Return -ENODEV if channel-path is not registered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) int chp_get_status(struct chp_id chpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * chp_get_sch_opm - return opm for subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * @sch: subchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * Calculate and return the operational path mask (opm) based on the chpids
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * used by the subchannel and the status of the associated channel-paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) u8 chp_get_sch_opm(struct subchannel *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct chp_id chpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	int opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	opm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	chp_id_init(&chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		opm <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		chpid.id = sch->schib.pmcw.chpid[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		if (chp_get_status(chpid) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			opm |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	return opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) EXPORT_SYMBOL_GPL(chp_get_sch_opm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * chp_is_registered - check if a channel-path is registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * @chpid: channel-path ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * Return non-zero if a channel-path with the given chpid is registered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int chp_is_registered(struct chp_id chpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	return chpid_to_chp(chpid) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * Function: s390_vary_chpid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * Varies the specified chpid online or offline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int s390_vary_chpid(struct chp_id chpid, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	char dbf_text[15];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		chpid.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	CIO_TRACE_EVENT(2, dbf_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	status = chp_get_status(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (!on && !status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	set_chp_logically_online(chpid, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	chsc_chp_vary(chpid, on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * Channel measurement related functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static ssize_t chp_measurement_chars_read(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 					  struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 					  struct bin_attribute *bin_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 					  char *buf, loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct channel_path *chp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	struct device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	device = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	chp = to_channelpath(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	if (chp->cmg == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 				       sizeof(chp->cmg_chars));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static const struct bin_attribute chp_measurement_chars_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		.name = "measurement_chars",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		.mode = S_IRUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	.size = sizeof(struct cmg_chars),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	.read = chp_measurement_chars_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void chp_measurement_copy_block(struct cmg_entry *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 				       struct channel_subsystem *css,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 				       struct chp_id chpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	void *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct cmg_entry *entry, reference_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (chpid.id < 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		area = css->cub_addr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		idx = chpid.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		area = css->cub_addr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		idx = chpid.id - 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	entry = area + (idx * sizeof(struct cmg_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		memcpy(buf, entry, sizeof(*entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		memcpy(&reference_buf, entry, sizeof(*entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	} while (reference_buf.values[0] != buf->values[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 				    struct bin_attribute *bin_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 				    char *buf, loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	struct channel_path *chp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	struct channel_subsystem *css;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	struct device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	device = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	chp = to_channelpath(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	css = to_css(chp->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	size = sizeof(struct cmg_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	/* Only allow single reads. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	if (off || count < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	count = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static const struct bin_attribute chp_measurement_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		.name = "measurement",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		.mode = S_IRUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	.size = sizeof(struct cmg_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	.read = chp_measurement_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void chp_remove_cmg_attr(struct channel_path *chp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	device_remove_bin_file(&chp->dev, &chp_measurement_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int chp_add_cmg_attr(struct channel_path *chp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * Files for the channel path entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static ssize_t chp_status_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			       struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	struct channel_path *chp = to_channelpath(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	mutex_lock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	status = chp->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	mutex_unlock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static ssize_t chp_status_write(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 				struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 				const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	struct channel_path *cp = to_channelpath(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	char cmd[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	int num_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	num_args = sscanf(buf, "%5s", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (!num_args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	/* Wait until previous actions have settled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	css_wait_for_slow_path();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		mutex_lock(&cp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		error = s390_vary_chpid(cp->chpid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		mutex_unlock(&cp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	} else if (!strncasecmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		mutex_lock(&cp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		error = s390_vary_chpid(cp->chpid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		mutex_unlock(&cp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return error < 0 ? error : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static ssize_t chp_configure_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 				  struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct channel_path *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	cp = to_channelpath(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	status = chp_info_get_status(cp->chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	return snprintf(buf, PAGE_SIZE, "%d\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int cfg_wait_idle(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static ssize_t chp_configure_write(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 				   struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 				   const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	struct channel_path *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	char delim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	if (sscanf(buf, "%d %c", &val, &delim) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	if (val != 0 && val != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	cp = to_channelpath(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	chp_cfg_schedule(cp->chpid, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	cfg_wait_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			     char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	struct channel_path *chp = to_channelpath(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	mutex_lock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	type = chp->desc.desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	mutex_unlock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	return sprintf(buf, "%x\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			    char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	struct channel_path *chp = to_channelpath(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	if (!chp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	if (chp->cmg == -1) /* channel measurements not available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		return sprintf(buf, "unknown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	return sprintf(buf, "%x\n", chp->cmg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static ssize_t chp_shared_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			       struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct channel_path *chp = to_channelpath(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (!chp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (chp->shared == -1) /* channel measurements not available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		return sprintf(buf, "unknown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	return sprintf(buf, "%x\n", chp->shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			     char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	struct channel_path *chp = to_channelpath(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	mutex_lock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	if (chp->desc_fmt1.flags & 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	mutex_unlock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static ssize_t chp_chid_external_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 				      struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	struct channel_path *chp = to_channelpath(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	mutex_lock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if (chp->desc_fmt1.flags & 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	mutex_unlock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 				struct bin_attribute *attr, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 				loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	ssize_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	mutex_lock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	rc = memory_read_from_buffer(buf, count, &off, chp->desc_fmt3.util_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 				     sizeof(chp->desc_fmt3.util_str));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	mutex_unlock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static BIN_ATTR_RO(util_string,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		   sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static struct bin_attribute *chp_bin_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	&bin_attr_util_string,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static struct attribute *chp_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	&dev_attr_status.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	&dev_attr_configure.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	&dev_attr_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	&dev_attr_cmg.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	&dev_attr_shared.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	&dev_attr_chid.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	&dev_attr_chid_external.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static struct attribute_group chp_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	.attrs = chp_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	.bin_attrs = chp_bin_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static const struct attribute_group *chp_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	&chp_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static void chp_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	struct channel_path *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	cp = to_channelpath(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	kfree(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  * chp_update_desc - update channel-path description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  * @chp: channel-path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  * Update the channel-path description of the specified channel-path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)  * including channel measurement related information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)  * Return zero on success, non-zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int chp_update_desc(struct channel_path *chp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	rc = chsc_determine_fmt0_channel_path_desc(chp->chpid, &chp->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	 * Fetching the following data is optional. Not all machines or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	 * hypervisors implement the required chsc commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	chsc_determine_fmt3_channel_path_desc(chp->chpid, &chp->desc_fmt3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	chsc_get_channel_measurement_chars(chp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  * chp_new - register a new channel-path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)  * @chpid: channel-path ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)  * Create and register data structure representing new channel-path. Return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)  * zero on success, non-zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int chp_new(struct chp_id chpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	struct channel_subsystem *css = css_by_id(chpid.cssid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	struct channel_path *chp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	mutex_lock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	if (chp_is_registered(chpid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	if (!chp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	/* fill in status, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	chp->chpid = chpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	chp->state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	chp->dev.parent = &css->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	chp->dev.groups = chp_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	chp->dev.release = chp_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	mutex_init(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	/* Obtain channel path description and fill it in. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	ret = chp_update_desc(chp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	if ((chp->desc.flags & 0x80) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	/* make it known to the system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	ret = device_register(&chp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			      chpid.cssid, chpid.id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		put_device(&chp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	if (css->cm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		ret = chp_add_cmg_attr(chp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 			device_unregister(&chp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	css->chps[chpid.id] = chp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	kfree(chp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	mutex_unlock(&css->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)  * chp_get_chp_desc - return newly allocated channel-path description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)  * @chpid: channel-path ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)  * On success return a newly allocated copy of the channel-path description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)  * data associated with the given channel-path ID. Return %NULL on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	struct channel_path *chp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	struct channel_path_desc_fmt0 *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	chp = chpid_to_chp(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	if (!chp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	desc = kmalloc(sizeof(*desc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	mutex_lock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	memcpy(desc, &chp->desc, sizeof(*desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	mutex_unlock(&chp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)  * chp_process_crw - process channel-path status change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)  * @crw0: channel report-word to handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)  * @crw1: second channel-report word (always NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)  * @overflow: crw overflow indication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)  * Handle channel-report-words indicating that the status of a channel-path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)  * has changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static void chp_process_crw(struct crw *crw0, struct crw *crw1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			    int overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	struct chp_id chpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	if (overflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		css_schedule_eval_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		      crw0->erc, crw0->rsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	 * Check for solicited machine checks. These are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	 * created by reset channel path and need not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	 * handled here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	if (crw0->slct) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		CIO_CRW_EVENT(2, "solicited machine check for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 			      "channel path %02X\n", crw0->rsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	chp_id_init(&chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	chpid.id = crw0->rsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	switch (crw0->erc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	case CRW_ERC_IPARM: /* Path has come. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	case CRW_ERC_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		chp_new(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		chsc_chp_online(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	case CRW_ERC_PERRI: /* Path has gone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	case CRW_ERC_PERRN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		chsc_chp_offline(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 			      crw0->erc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		mask = 0x80 >> i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		if (!(ssd->path_mask & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		if ((ssd->fla_valid_mask & mask) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		    ((ssd->fla[i] & link->fla_mask) != link->fla))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static inline int info_bit_num(struct chp_id id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	return id.id + id.cssid * (__MAX_CHPID + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Force chp_info refresh on next call to info_validate(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) static void info_expire(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	mutex_lock(&info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	chp_info_expires = jiffies - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	mutex_unlock(&info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Ensure that chp_info is up-to-date. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static int info_update(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	mutex_lock(&info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	if (time_after(jiffies, chp_info_expires)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		/* Data is too old, update. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		rc = sclp_chp_read_info(&chp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	mutex_unlock(&info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)  * chp_info_get_status - retrieve configure status of a channel-path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)  * @chpid: channel-path ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)  * On success, return 0 for standby, 1 for configured, 2 for reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)  * 3 for not recognized. Return negative error code on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int chp_info_get_status(struct chp_id chpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	rc = info_update();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	bit = info_bit_num(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	mutex_lock(&info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	if (!chp_test_bit(chp_info.recognized, bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		rc = CHP_STATUS_NOT_RECOGNIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	else if (chp_test_bit(chp_info.configured, bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		rc = CHP_STATUS_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	else if (chp_test_bit(chp_info.standby, bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		rc = CHP_STATUS_STANDBY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		rc = CHP_STATUS_RESERVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	mutex_unlock(&info_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* Return configure task for chpid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static enum cfg_task_t cfg_get_task(struct chp_id chpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	return chp_cfg_task[chpid.cssid][chpid.id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* Set configure task for chpid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	chp_cfg_task[chpid.cssid][chpid.id] = cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /* Fetch the first configure task. Set chpid accordingly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static enum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	enum cfg_task_t t = cfg_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	chp_id_for_each(chpid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		t = cfg_get_task(*chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		if (t != cfg_none)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	return t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* Perform one configure/deconfigure request. Reschedule work function until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)  * last request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static void cfg_func(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	struct chp_id chpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	enum cfg_task_t t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	spin_lock(&cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	t = chp_cfg_fetch_task(&chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	spin_unlock(&cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	switch (t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	case cfg_configure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 		rc = sclp_chp_configure(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 			CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 				      "%d\n", chpid.cssid, chpid.id, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 			info_expire();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 			chsc_chp_online(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	case cfg_deconfigure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		rc = sclp_chp_deconfigure(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 			CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 				      "%d\n", chpid.cssid, chpid.id, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 			info_expire();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 			chsc_chp_offline(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	case cfg_none:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 		/* Get updated information after last change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 		info_update();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 		wake_up_interruptible(&cfg_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	spin_lock(&cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	if (t == cfg_get_task(chpid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		cfg_set_task(chpid, cfg_none);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	spin_unlock(&cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	schedule_work(&cfg_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)  * chp_cfg_schedule - schedule chpid configuration request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)  * @chpid: channel-path ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)  * @configure: Non-zero for configure, zero for deconfigure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)  * Schedule a channel-path configuration/deconfiguration request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) void chp_cfg_schedule(struct chp_id chpid, int configure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		      configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	spin_lock(&cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	spin_unlock(&cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	schedule_work(&cfg_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)  * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)  * @chpid: channel-path ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)  * Cancel an active channel-path deconfiguration request if it has not yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)  * been performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) void chp_cfg_cancel_deconfigure(struct chp_id chpid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	spin_lock(&cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	if (cfg_get_task(chpid) == cfg_deconfigure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		cfg_set_task(chpid, cfg_none);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	spin_unlock(&cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) static bool cfg_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	struct chp_id chpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	enum cfg_task_t t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	spin_lock(&cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	t = chp_cfg_fetch_task(&chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	spin_unlock(&cfg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	return t == cfg_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static int cfg_wait_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	if (wait_event_interruptible(cfg_wait_queue, cfg_idle()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static int __init chp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	struct chp_id chpid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	int state, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	INIT_WORK(&cfg_work, cfg_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	init_waitqueue_head(&cfg_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	if (info_update())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	/* Register available channel-paths. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	chp_id_for_each(&chpid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 		state = chp_info_get_status(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 		if (state == CHP_STATUS_CONFIGURED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 		    state == CHP_STATUS_STANDBY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 			chp_new(chpid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) subsys_initcall(chp_init);