Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Linux driver for System z and s390 unit record devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * (z/VM virtual punch, reader, printer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright IBM Corp. 2001, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Authors: Malcolm Beattie <beattiem@uk.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *	    Michael Holzheu <holzheu@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *	    Frank Munzert <munzert@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #define KMSG_COMPONENT "vmur"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/cio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <asm/ccwdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <asm/diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "vmur.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * Driver overview
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * Unit record device support is implemented as a character device driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * We can fit at least 16 bits into a device minor number and use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * simple method of mapping a character device number with minor abcd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * to the unit record device with devno abcd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * I/O to virtual unit record devices is handled as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * Reads: Diagnose code 0x14 (input spool file manipulation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * is used to read spool data page-wise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * is available by reading sysfs attr reclen. Each write() to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * must specify an integral multiple (maximal 511) of reclen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) static char ur_banner[] = "z/VM virtual unit record device driver";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) MODULE_AUTHOR("IBM Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) static dev_t ur_first_dev_maj_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static struct class *vmur_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) static struct debug_info *vmur_dbf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) /* We put the device's record length (for writes) in the driver_info field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) static struct ccw_device_id ur_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	{ CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	{ CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	{ /* end of list */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) MODULE_DEVICE_TABLE(ccw, ur_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static int ur_probe(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static void ur_remove(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static int ur_set_online(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static int ur_set_offline(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) static int ur_pm_suspend(struct ccw_device *cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static struct ccw_driver ur_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		.name	= "vmur",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		.owner	= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	.ids		= ur_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	.probe		= ur_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	.remove		= ur_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	.set_online	= ur_set_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	.set_offline	= ur_set_offline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	.freeze		= ur_pm_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	.int_class	= IRQIO_VMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static DEFINE_MUTEX(vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * Allocation, freeing, getting and putting of urdev structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * Each ur device (urd) contains a reference to its corresponding ccw device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * ur device using dev_get_drvdata(&cdev->dev) pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  * urd references:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  * - ur_probe gets a urd reference, ur_remove drops the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  *   dev_get_drvdata(&cdev->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  * - ur_open gets a urd reference, ur_release drops the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  *   (urf->urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  * cdev references:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  * - urdev_alloc get a cdev reference (urd->cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * - urdev_free drops the cdev reference (urd->cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static struct urdev *urdev_alloc(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	urd = kzalloc(sizeof(struct urdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (!urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	urd->reclen = cdev->id.driver_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	ccw_device_get_id(cdev, &urd->dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	mutex_init(&urd->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	init_waitqueue_head(&urd->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	spin_lock_init(&urd->open_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	refcount_set(&urd->ref_count,  1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	urd->cdev = cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	get_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	return urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) static void urdev_free(struct urdev *urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	TRACE("urdev_free: %p\n", urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	if (urd->cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		put_device(&urd->cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	kfree(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static void urdev_get(struct urdev *urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	refcount_inc(&urd->ref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	urd = dev_get_drvdata(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	if (urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		urdev_get(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	return urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) static struct urdev *urdev_get_from_devno(u16 devno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	char bus_id[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	struct ccw_device *cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	sprintf(bus_id, "0.0.%04x", devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if (!cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	urd = urdev_get_from_cdev(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	put_device(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	return urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static void urdev_put(struct urdev *urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	if (refcount_dec_and_test(&urd->ref_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		urdev_free(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  * State and contents of ur devices can be changed by class D users issuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  * Also the Linux guest might be logged off, which causes all active spool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  * files to be closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  * So we cannot guarantee that spool files are still the same when the Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * guest is resumed. In order to avoid unpredictable results at resume time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * we simply refuse to suspend if a ur device node is open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static int ur_pm_suspend(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct urdev *urd = dev_get_drvdata(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	TRACE("ur_pm_suspend: cdev=%p\n", cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	if (urd->open_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		pr_err("Unit record device %s is busy, %s refusing to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		       "suspend.\n", dev_name(&cdev->dev), ur_banner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * Low-level functions to do I/O to a ur device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  *     alloc_chan_prog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  *     free_chan_prog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  *     do_ur_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  *     ur_int_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * alloc_chan_prog allocates and builds the channel program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  * free_chan_prog frees memory of the channel program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * do_ur_io issues the channel program to the device and blocks waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * on a completion event it publishes at urd->io_done. The function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  * serialises itself on the device's mutex so that only one I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  * is issued at a time (and that I/O is synchronous).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  * ur_int_handler catches the "I/O done" interrupt, writes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  * subchannel status word into the scsw member of the urdev structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  * and complete()s the io_done to wake the waiting do_ur_io.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  * The caller of do_ur_io is responsible for kfree()ing the channel program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  * address pointer that alloc_chan_prog returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) static void free_chan_prog(struct ccw1 *cpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	struct ccw1 *ptr = cpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	while (ptr->cda) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		kfree((void *)(addr_t) ptr->cda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	kfree(cpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * alloc_chan_prog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * The channel program we use is write commands chained together
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * with a final NOP CCW command-chained on (which ensures that CE and DE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * are presented together in a single interrupt instead of as separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * interrupts unless an incorrect length indication kicks in first). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * data length in each CCW is reclen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 				    int reclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	struct ccw1 *cpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	void *kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	 * We chain a NOP onto the writes to force CE+DE together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	 * That means we allocate room for CCWs to cover count/reclen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	 * records plus a NOP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	cpa = kcalloc(rec_count + 1, sizeof(struct ccw1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		      GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	if (!cpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	for (i = 0; i < rec_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		cpa[i].cmd_code = WRITE_CCW_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		cpa[i].count = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		if (!kbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			free_chan_prog(cpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		cpa[i].cda = (u32)(addr_t) kbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		if (copy_from_user(kbuf, ubuf, reclen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			free_chan_prog(cpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			return ERR_PTR(-EFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		ubuf += reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	/* The following NOP CCW forces CE+DE to be presented together */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	cpa[i].cmd_code = CCW_CMD_NOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	return cpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	struct ccw_device *cdev = urd->cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	DECLARE_COMPLETION_ONSTACK(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	TRACE("do_ur_io: cpa=%p\n", cpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	rc = mutex_lock_interruptible(&urd->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	urd->io_done = &event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	spin_lock_irq(get_ccwdev_lock(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	rc = ccw_device_start(cdev, cpa, 1, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	spin_unlock_irq(get_ccwdev_lock(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	TRACE("do_ur_io: ccw_device_start returned %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	wait_for_completion(&event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	TRACE("do_ur_io: I/O complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	mutex_unlock(&urd->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)  * ur interrupt handler, called from the ccw_device layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			   struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	if (!IS_ERR(irb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		      intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		      irb->scsw.cmd.count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	if (!intparm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		TRACE("ur_int_handler: unsolicited interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	urd = dev_get_drvdata(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	BUG_ON(!urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	/* On special conditions irb is an error pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	if (IS_ERR(irb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		urd->io_request_rc = PTR_ERR(irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		urd->io_request_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		urd->io_request_rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	complete(urd->io_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * reclen sysfs attribute - The record length to be used for write CCWs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) static ssize_t ur_attr_reclen_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 				   struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	urd = urdev_get_from_cdev(to_ccwdev(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (!urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	rc = sprintf(buf, "%zu\n", urd->reclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	urdev_put(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) static int ur_create_attributes(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	return device_create_file(dev, &dev_attr_reclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) static void ur_remove_attributes(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	device_remove_file(dev, &dev_attr_reclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * diagnose code 0x210 - retrieve device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * cc=0  normal completion, we have a real device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * cc=1  CP paging error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  * cc=2  The virtual device exists, but is not associated with a real device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  * cc=3  Invalid device address, or the virtual device does not exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static int get_urd_class(struct urdev *urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	static struct diag210 ur_diag210;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	ur_diag210.vrdcdvno = urd->dev_id.devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	ur_diag210.vrdclen = sizeof(struct diag210);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	cc = diag210(&ur_diag210);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	switch (cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		return ur_diag210.vrdcvcla; /* virtual device class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  * Allocation and freeing of urfile structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static struct urfile *urfile_alloc(struct urdev *urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct urfile *urf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	urf = kzalloc(sizeof(struct urfile), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (!urf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	urf->urd = urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	      urf->dev_reclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	return urf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) static void urfile_free(struct urfile *urf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	kfree(urf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  * The fops implementation of the character device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static ssize_t do_write(struct urdev *urd, const char __user *udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 			size_t count, size_t reclen, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	struct ccw1 *cpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	cpa = alloc_chan_prog(udata, count / reclen, reclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	if (IS_ERR(cpa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		return PTR_ERR(cpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	rc = do_ur_io(urd, cpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		goto fail_kfree_cpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	if (urd->io_request_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		rc = urd->io_request_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		goto fail_kfree_cpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	*ppos += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	rc = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) fail_kfree_cpa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	free_chan_prog(cpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static ssize_t ur_write(struct file *file, const char __user *udata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct urfile *urf = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	TRACE("ur_write: count=%zu\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	if (count % urf->dev_reclen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		return -EINVAL;	/* count must be a multiple of reclen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	if (count > urf->dev_reclen * MAX_RECS_PER_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		count = urf->dev_reclen * MAX_RECS_PER_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	return do_write(urf->urd, udata, count, urf->dev_reclen, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  * diagnose code 0x14 subcode 0x0028 - position spool file to designated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463)  *				       record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  * cc=0  normal completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  * cc=2  no file active on the virtual reader or device not ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  * cc=3  record specified is beyond EOF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) static int diag_position_to_record(int devno, int record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	cc = diag14(record, devno, 0x28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	switch (cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		return -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		return -ENODATA; /* position beyond end of file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486)  * diagnose code 0x14 subcode 0x0000 - read next spool file buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  * cc=0  normal completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  * cc=1  EOF reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  * cc=2  no file active on the virtual reader, and no file eligible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  * cc=3  file already active on the virtual reader or specified virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  *	 reader does not exist or is not a reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static int diag_read_file(int devno, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	cc = diag14((unsigned long) buf, devno, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	switch (cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		return -ENOMEDIUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			   loff_t *offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	size_t len, copied, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	u16 reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	urd = ((struct urfile *) file->private_data)->urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	reclen = ((struct urfile *) file->private_data)->file_reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (rc == -ENODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	len = min((size_t) PAGE_SIZE, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	res = (size_t) (*offs % PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		rc = diag_read_file(urd->dev_id.devno, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		if (rc == -ENODATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		if (reclen && (copied == 0) && (*offs < PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			*((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		len = min(count - copied, PAGE_SIZE - res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		if (copy_to_user(ubuf + copied, buf + res, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		copied += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	} while (copied != count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	*offs += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	rc = copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	free_page((unsigned long) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		       loff_t *offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	urd = ((struct urfile *) file->private_data)->urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	rc = mutex_lock_interruptible(&urd->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	rc = diag14_read(file, ubuf, count, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	mutex_unlock(&urd->io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * cc=0  normal completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  * cc=1  no files on reader queue or no subsequent file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  * cc=2  spid specified is invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static int diag_read_next_file_info(struct file_control_block *buf, int spid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	cc = diag14((unsigned long) buf, spid, 0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	switch (cc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) static int verify_uri_device(struct urdev *urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct file_control_block *fcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (!fcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	/* check for empty reader device (beginning of chain) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	rc = diag_read_next_file_info(fcb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		goto fail_free_fcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	/* if file is in hold status, we do not read it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		rc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		goto fail_free_fcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	/* open file on virtual reader	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		goto fail_free_fcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	rc = diag_read_file(urd->dev_id.devno, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		goto fail_free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	/* check if the file on top of the queue is open now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	rc = diag_read_next_file_info(fcb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		goto fail_free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	if (!(fcb->file_stat & FLG_IN_USE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		rc = -EMFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		goto fail_free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) fail_free_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	free_page((unsigned long) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) fail_free_fcb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	kfree(fcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static int verify_device(struct urdev *urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	switch (urd->class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	case DEV_CLASS_UR_O:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		return 0; /* no check needed here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	case DEV_CLASS_UR_I:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		return verify_uri_device(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) static int get_uri_file_reclen(struct urdev *urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	struct file_control_block *fcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	if (!fcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	rc = diag_read_next_file_info(fcb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		goto fail_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	if (fcb->file_stat & FLG_CP_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		rc = fcb->rec_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) fail_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	kfree(fcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) static int get_file_reclen(struct urdev *urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	switch (urd->class) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	case DEV_CLASS_UR_O:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	case DEV_CLASS_UR_I:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		return get_uri_file_reclen(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) static int ur_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	u16 devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	struct urfile *urf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	unsigned short accmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	accmode = file->f_flags & O_ACCMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (accmode == O_RDWR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	 * We treat the minor number as the devno of the ur device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	 * to find in the driver tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	devno = MINOR(file_inode(file)->i_rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	urd = urdev_get_from_devno(devno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	if (!urd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	spin_lock(&urd->open_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	while (urd->open_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		spin_unlock(&urd->open_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		if (file->f_flags & O_NONBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			goto fail_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			rc = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			goto fail_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		spin_lock(&urd->open_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	urd->open_flag++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	spin_unlock(&urd->open_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	TRACE("ur_open\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	    ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		TRACE("ur_open: unsupported dev class (%d)\n", urd->class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		rc = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	rc = verify_device(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	urf = urfile_alloc(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (!urf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	urf->dev_reclen = urd->reclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	rc = get_file_reclen(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		goto fail_urfile_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	urf->file_reclen = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	file->private_data = urf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) fail_urfile_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	urfile_free(urf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) fail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	spin_lock(&urd->open_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	urd->open_flag--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	spin_unlock(&urd->open_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) fail_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	urdev_put(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) static int ur_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	struct urfile *urf = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	TRACE("ur_release\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	spin_lock(&urf->urd->open_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	urf->urd->open_flag--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	spin_unlock(&urf->urd->open_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	wake_up_interruptible(&urf->urd->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	urdev_put(urf->urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	urfile_free(urf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) static loff_t ur_llseek(struct file *file, loff_t offset, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	if ((file->f_flags & O_ACCMODE) != O_RDONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		return -ESPIPE; /* seek allowed only for reader */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	if (offset % PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		return -ESPIPE; /* only multiples of 4K allowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	return no_seek_end_llseek(file, offset, whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) static const struct file_operations ur_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	.owner	 = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	.open	 = ur_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	.release = ur_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	.read	 = ur_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	.write	 = ur_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	.llseek  = ur_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804)  * ccw_device infrastructure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  *     ur_probe creates the struct urdev (with refcount = 1), the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806)  *     attributes, sets up the interrupt handler and validates the virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807)  *     unit record device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808)  *     ur_remove removes the device attributes and drops the reference to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809)  *     struct urdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  *     ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  *     by the vmur_mutex lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  *     urd->char_device is used as indication that the online function has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  *     been completed successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static int ur_probe(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	TRACE("ur_probe: cdev=%p\n", cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	mutex_lock(&vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	urd = urdev_alloc(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (!urd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	rc = ur_create_attributes(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		goto fail_urdev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	cdev->handler = ur_int_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	/* validate virtual unit record device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	urd->class = get_urd_class(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (urd->class < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		rc = urd->class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		goto fail_remove_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		goto fail_remove_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	spin_lock_irq(get_ccwdev_lock(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	dev_set_drvdata(&cdev->dev, urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	spin_unlock_irq(get_ccwdev_lock(cdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	mutex_unlock(&vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) fail_remove_attr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	ur_remove_attributes(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) fail_urdev_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	urdev_put(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) fail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	mutex_unlock(&vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) static int ur_set_online(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	int minor, major, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	char node_id[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	TRACE("ur_set_online: cdev=%p\n", cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	mutex_lock(&vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	urd = urdev_get_from_cdev(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	if (!urd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		/* ur_remove already deleted our urd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (urd->char_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		/* Another ur_set_online was faster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		goto fail_urdev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	minor = urd->dev_id.devno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	major = MAJOR(ur_first_dev_maj_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	urd->char_device = cdev_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	if (!urd->char_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		goto fail_urdev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	urd->char_device->ops = &ur_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	urd->char_device->owner = ur_fops.owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		goto fail_free_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		if (urd->class == DEV_CLASS_UR_I)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		if (urd->class == DEV_CLASS_UR_O)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		goto fail_free_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	urd->device = device_create(vmur_class, &cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 				    urd->char_device->dev, NULL, "%s", node_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (IS_ERR(urd->device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		rc = PTR_ERR(urd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		TRACE("ur_set_online: device_create rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		goto fail_free_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	urdev_put(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	mutex_unlock(&vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) fail_free_cdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	cdev_del(urd->char_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	urd->char_device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) fail_urdev_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	urdev_put(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) fail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	mutex_unlock(&vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) static int ur_set_offline_force(struct ccw_device *cdev, int force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct urdev *urd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	TRACE("ur_set_offline: cdev=%p\n", cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	urd = urdev_get_from_cdev(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	if (!urd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		/* ur_remove already deleted our urd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (!urd->char_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		/* Another ur_set_offline was faster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		goto fail_urdev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (!force && (refcount_read(&urd->ref_count) > 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		/* There is still a user of urd (e.g. ur_open) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		TRACE("ur_set_offline: BUSY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		goto fail_urdev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	device_destroy(vmur_class, urd->char_device->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	cdev_del(urd->char_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	urd->char_device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) fail_urdev_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	urdev_put(urd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) static int ur_set_offline(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	mutex_lock(&vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	rc = ur_set_offline_force(cdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	mutex_unlock(&vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) static void ur_remove(struct ccw_device *cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	TRACE("ur_remove\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	mutex_lock(&vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	if (cdev->online)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		ur_set_offline_force(cdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	ur_remove_attributes(&cdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	urdev_put(dev_get_drvdata(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	dev_set_drvdata(&cdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	mutex_unlock(&vmur_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  * Module initialisation and cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static int __init ur_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	dev_t dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	if (!MACHINE_IS_VM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		pr_err("The %s cannot be loaded without z/VM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		       ur_banner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	if (!vmur_dbf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	rc = debug_register_view(vmur_dbf, &debug_sprintf_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		goto fail_free_dbf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	debug_set_level(vmur_dbf, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	vmur_class = class_create(THIS_MODULE, "vmur");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	if (IS_ERR(vmur_class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		rc = PTR_ERR(vmur_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		goto fail_free_dbf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	rc = ccw_driver_register(&ur_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		goto fail_class_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		pr_err("Kernel function alloc_chrdev_region failed with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		       "error code %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		goto fail_unregister_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	pr_info("%s loaded.\n", ur_banner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) fail_unregister_driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	ccw_driver_unregister(&ur_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) fail_class_destroy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	class_destroy(vmur_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) fail_free_dbf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	debug_unregister(vmur_dbf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static void __exit ur_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	ccw_driver_unregister(&ur_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	class_destroy(vmur_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	debug_unregister(vmur_dbf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	pr_info("%s unloaded.\n", ur_banner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) module_init(ur_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) module_exit(ur_exit);