Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  CCW device PGID and path verification I/O handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *    Copyright IBM Corp. 2002, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *    Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *		 Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/ccwdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/cio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "cio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "cio_debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include "io_sch.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define PGID_RETRIES	256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define PGID_TIMEOUT	(10 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static void verify_start(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * Process path verification data and report result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) static void verify_done(struct ccw_device *cdev, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct ccw_dev_id *id = &cdev->private->dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	int mpath = cdev->private->flags.mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	int pgroup = cdev->private->flags.pgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	/* Ensure consistent multipathing state at device and channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	if (sch->config.mp != mpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		sch->config.mp = mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		rc = cio_commit_config(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 			 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 			 sch->vpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	ccw_device_verify_done(cdev, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * Create channel program to perform a NOOP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static void nop_build_cp(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct ccw1 *cp = cdev->private->dma_area->iccws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	cp->cmd_code	= CCW_CMD_NOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	cp->cda		= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	cp->count	= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	cp->flags	= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	req->cp		= cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * Perform NOOP on a single path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) static void nop_do(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			      ~cdev->private->path_noirq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (!req->lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		goto out_nopath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	nop_build_cp(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	ccw_request_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) out_nopath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	verify_done(cdev, sch->vpm ? 0 : -EACCES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * Adjust NOOP I/O status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static enum io_status nop_filter(struct ccw_device *cdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 				 struct irb *irb, enum io_status status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	/* Only subchannel status might indicate a path error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		return IO_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * Process NOOP request result for a single path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void nop_callback(struct ccw_device *cdev, void *data, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		sch->vpm |= req->lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	case -ETIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		cdev->private->path_noirq_mask |= req->lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		cdev->private->path_notoper_mask |= req->lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	/* Continue on the next path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	req->lpm >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	nop_do(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	verify_done(cdev, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * Create channel program to perform SET PGID on a single path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void spid_build_cp(struct ccw_device *cdev, u8 fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct ccw1 *cp = cdev->private->dma_area->iccws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	int i = pathmask_to_pos(req->lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct pgid *pgid = &cdev->private->dma_area->pgid[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	pgid->inf.fc	= fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	cp->cmd_code	= CCW_CMD_SET_PGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	cp->cda		= (u32) (addr_t) pgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	cp->count	= sizeof(*pgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	cp->flags	= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	req->cp		= cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		/* We don't know the path groups' state. Abort. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		verify_done(cdev, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	 * Path groups have been reset. Restart path verification but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 * leave paths in path_noirq_mask out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	cdev->private->flags.pgid_unknown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	verify_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * Reset pathgroups and restart path verification, leave unusable paths out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void pgid_wipeout_start(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	struct ccw_dev_id *id = &cdev->private->dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	u8 fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		      id->ssid, id->devno, cdev->private->pgid_valid_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		      cdev->private->path_noirq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	/* Initialize request data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	req->timeout	= PGID_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	req->maxretries	= PGID_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	req->lpm	= sch->schib.pmcw.pam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	req->callback	= pgid_wipeout_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	fn = SPID_FUNC_DISBAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (cdev->private->flags.mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		fn |= SPID_FUNC_MULTI_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	spid_build_cp(cdev, fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	ccw_request_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * Perform establish/resign SET PGID on a single path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static void spid_do(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	u8 fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	/* Use next available path that is not already in correct state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (!req->lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		goto out_nopath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	/* Channel program setup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	if (req->lpm & sch->opm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		fn = SPID_FUNC_ESTABLISH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		fn = SPID_FUNC_RESIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (cdev->private->flags.mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		fn |= SPID_FUNC_MULTI_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	spid_build_cp(cdev, fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	ccw_request_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) out_nopath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (cdev->private->flags.pgid_unknown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		/* At least one SPID could be partially done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		pgid_wipeout_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	verify_done(cdev, sch->vpm ? 0 : -EACCES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * Process SET PGID request result for a single path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void spid_callback(struct ccw_device *cdev, void *data, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		sch->vpm |= req->lpm & sch->opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	case -ETIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		cdev->private->flags.pgid_unknown = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		cdev->private->path_noirq_mask |= req->lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		cdev->private->path_notoper_mask |= req->lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	case -EOPNOTSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		if (cdev->private->flags.mpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			/* Try without multipathing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			cdev->private->flags.mpath = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			goto out_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		/* Try without pathgrouping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		cdev->private->flags.pgroup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		goto out_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	req->lpm >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	spid_do(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) out_restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	verify_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	verify_done(cdev, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static void spid_start(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	/* Initialize request data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	req->timeout	= PGID_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	req->maxretries	= PGID_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	req->lpm	= 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	req->singlepath	= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	req->callback	= spid_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	spid_do(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static int pgid_is_reset(struct pgid *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	char *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		if (*c != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int pgid_cmp(struct pgid *p1, struct pgid *p2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	return memcmp((char *) p1 + 1, (char *) p2 + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		      sizeof(struct pgid) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * Determine pathgroup state from PGID data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			 int *mismatch, u8 *reserved, u8 *reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	struct pgid *pgid = &cdev->private->dma_area->pgid[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	struct pgid *first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	int lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	*mismatch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	*reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	*reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		if ((cdev->private->pgid_valid_mask & lpm) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			*reserved |= lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		if (pgid_is_reset(pgid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			*reset |= lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		if (!first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			first = pgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		if (pgid_cmp(pgid, first) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			*mismatch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		first = &channel_subsystems[0]->global_pgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	*p = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static u8 pgid_to_donepm(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	struct pgid *pgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	int lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	u8 donepm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	/* Set bits for paths which are already in the target state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		lpm = 0x80 >> i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		if ((cdev->private->pgid_valid_mask & lpm) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		pgid = &cdev->private->dma_area->pgid[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		if (sch->opm & lpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		if (cdev->private->flags.mpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		donepm |= lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	return donepm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		memcpy(&cdev->private->dma_area->pgid[i], pgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		       sizeof(struct pgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  * Process SENSE PGID data and report result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static void snid_done(struct ccw_device *cdev, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	struct ccw_dev_id *id = &cdev->private->dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	struct pgid *pgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	int mismatch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	u8 reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	u8 reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	u8 donepm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	if (reserved == cdev->private->pgid_valid_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		rc = -EUSERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	else if (mismatch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		donepm = pgid_to_donepm(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		sch->vpm = donepm & sch->opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		cdev->private->pgid_reset_mask |= reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		cdev->private->pgid_todo_mask &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			~(donepm | cdev->private->path_noirq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		pgid_fill(cdev, pgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		      "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		      id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		      cdev->private->pgid_todo_mask, mismatch, reserved, reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		if (cdev->private->flags.pgid_unknown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			pgid_wipeout_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		/* Anything left to do? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		if (cdev->private->pgid_todo_mask == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		/* Perform path-grouping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		spid_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	case -EOPNOTSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		/* Path-grouping not supported. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		cdev->private->flags.pgroup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		cdev->private->flags.mpath = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		verify_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		verify_done(cdev, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  * Create channel program to perform a SENSE PGID on a single path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void snid_build_cp(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	struct ccw1 *cp = cdev->private->dma_area->iccws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	int i = pathmask_to_pos(req->lpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	/* Channel program setup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	cp->cmd_code	= CCW_CMD_SENSE_PGID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	cp->cda		= (u32) (addr_t) &cdev->private->dma_area->pgid[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	cp->count	= sizeof(struct pgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	cp->flags	= CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	req->cp		= cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)  * Perform SENSE PGID on a single path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static void snid_do(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			      ~cdev->private->path_noirq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	if (!req->lpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		goto out_nopath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	snid_build_cp(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	ccw_request_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) out_nopath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	if (cdev->private->pgid_valid_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	else if (cdev->private->path_noirq_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		ret = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		ret = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	snid_done(cdev, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)  * Process SENSE PGID request result for single path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static void snid_callback(struct ccw_device *cdev, void *data, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		cdev->private->pgid_valid_mask |= req->lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	case -ETIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		cdev->private->flags.pgid_unknown = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		cdev->private->path_noirq_mask |= req->lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		cdev->private->path_notoper_mask |= req->lpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	/* Continue on the next path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	req->lpm >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	snid_do(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	snid_done(cdev, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)  * Perform path verification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static void verify_start(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	struct ccw_dev_id *devid = &cdev->private->dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	sch->vpm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	sch->lpm = sch->schib.pmcw.pam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	/* Initialize PGID data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	memset(cdev->private->dma_area->pgid, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	       sizeof(cdev->private->dma_area->pgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	cdev->private->pgid_valid_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	cdev->private->path_notoper_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	/* Initialize request data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	req->timeout	= PGID_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	req->maxretries	= PGID_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	req->lpm	= 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	req->singlepath	= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	if (cdev->private->flags.pgroup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		CIO_TRACE_EVENT(4, "snid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		CIO_HEX_EVENT(4, devid, sizeof(*devid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		req->callback	= snid_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		snid_do(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		CIO_TRACE_EVENT(4, "nop");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		CIO_HEX_EVENT(4, devid, sizeof(*devid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		req->filter	= nop_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		req->callback	= nop_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		nop_do(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)  * ccw_device_verify_start - perform path verification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)  * @cdev: ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)  * Perform an I/O on each available channel path to @cdev to determine which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)  * paths are operational. The resulting path mask is stored in sch->vpm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)  * If device options specify pathgrouping, establish a pathgroup for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)  * operational paths. When finished, call ccw_device_verify_done with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)  * return code specifying the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) void ccw_device_verify_start(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	CIO_TRACE_EVENT(4, "vrfy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	 * Initialize pathgroup and multipath state with target values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	 * They may change in the course of path verification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	cdev->private->flags.pgroup = cdev->private->options.pgroup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	cdev->private->flags.mpath = cdev->private->options.mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	cdev->private->flags.doverify = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	cdev->private->path_noirq_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	verify_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)  * Process disband SET PGID request result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void disband_callback(struct ccw_device *cdev, void *data, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	struct ccw_dev_id *id = &cdev->private->dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	/* Ensure consistent multipathing state at device and channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	cdev->private->flags.mpath = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	if (sch->config.mp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		sch->config.mp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		rc = cio_commit_config(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		      rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	ccw_device_disband_done(cdev, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)  * ccw_device_disband_start - disband pathgroup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)  * @cdev: ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)  * Execute a SET PGID channel program on @cdev to disband a previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)  * established pathgroup. When finished, call ccw_device_disband_done with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)  * a return code specifying the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) void ccw_device_disband_start(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	u8 fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	CIO_TRACE_EVENT(4, "disb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	/* Request setup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	req->timeout	= PGID_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	req->maxretries	= PGID_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	req->lpm	= sch->schib.pmcw.pam & sch->opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	req->singlepath	= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	req->callback	= disband_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	fn = SPID_FUNC_DISBAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	if (cdev->private->flags.mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		fn |= SPID_FUNC_MULTI_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	spid_build_cp(cdev, fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	ccw_request_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct stlck_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	struct completion done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	struct ccw1 *cp = cdev->private->dma_area->iccws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	cp[0].cmd_code = CCW_CMD_STLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	cp[0].cda = (u32) (addr_t) buf1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	cp[0].count = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	cp[0].flags = CCW_FLAG_CC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	cp[1].cmd_code = CCW_CMD_RELEASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	cp[1].cda = (u32) (addr_t) buf2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	cp[1].count = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	cp[1].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	req->cp = cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	struct stlck_data *sdata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	sdata->rc = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	complete(&sdata->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)  * ccw_device_stlck_start - perform unconditional release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)  * @cdev: ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)  * @data: data pointer to be passed to ccw_device_stlck_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)  * @buf1: data pointer used in channel program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)  * @buf2: data pointer used in channel program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)  * Execute a channel program on @cdev to release an existing PGID reservation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static void ccw_device_stlck_start(struct ccw_device *cdev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 				   void *buf1, void *buf2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	struct ccw_request *req = &cdev->private->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	CIO_TRACE_EVENT(4, "stlck");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	/* Request setup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	req->timeout	= PGID_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	req->maxretries	= PGID_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	req->lpm	= sch->schib.pmcw.pam & sch->opm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	req->data	= data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	req->callback	= stlck_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	stlck_build_cp(cdev, buf1, buf2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	ccw_request_start(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)  * Perform unconditional reserve + release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int ccw_device_stlck(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	struct stlck_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	u8 *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 	/* Check if steal lock operation is valid for this device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	if (cdev->drv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		if (!cdev->private->options.force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	init_completion(&data.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	data.rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	/* Perform operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	cdev->private->state = DEV_STATE_STEAL_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	/* Wait for operation to finish. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	if (wait_for_completion_interruptible(&data.done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		/* Got a signal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 		ccw_request_cancel(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		wait_for_completion(&data.done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	rc = data.rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	/* Check results. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	spin_lock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	cio_disable_subchannel(sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	cdev->private->state = DEV_STATE_BOXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	spin_unlock_irq(sch->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }