Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *	character device driver for reading z/VM system service records
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *	Copyright IBM Corp. 2004, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *	character device driver for reading z/VM system service records,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *	Version 1.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *	Author(s): Xenia Tkatschow <xenia@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *		   Stefan Weinhuber <wein@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define KMSG_COMPONENT "vmlogrdr"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/cpcmd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/ebcdic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <net/iucv/iucv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) MODULE_AUTHOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	 "                            Stefan Weinhuber (wein@de.ibm.com)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) MODULE_DESCRIPTION ("Character device driver for reading z/VM "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		    "system service records.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * The size of the buffer for iucv data transfer is one page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * but in addition to the data we read from iucv we also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * place an integer and some characters into that buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * so the maximum size for record data is a little less then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * one page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define NET_BUFFER_SIZE	(PAGE_SIZE - sizeof(int) - sizeof(FENCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * The elements that are concurrently accessed by bottom halves are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * connection_established, iucv_path_severed, local_interrupt_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * and receive_ready. The first three can be protected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * priv_lock.  receive_ready is atomic, so it can be incremented and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * decremented without holding a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * The variable dev_in_use needs to be protected by the lock, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * it's a flag used by open to make sure that the device is opened only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * by one user at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) struct vmlogrdr_priv_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	char system_service[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	char internal_name[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	char recording_name[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct iucv_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	int connection_established;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	int iucv_path_severed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct iucv_message local_interrupt_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	atomic_t receive_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	int minor_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	char * buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	char * current_position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	int remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	ulong residual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	int buffer_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	int dev_in_use; /* 1: already opened, 0: not opened*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	spinlock_t priv_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct device  *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct device  *class_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	int autorecording;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	int autopurge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * File operation structure for vmlogrdr devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static int vmlogrdr_open(struct inode *, struct file *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static int vmlogrdr_release(struct inode *, struct file *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			      size_t count, loff_t * ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static const struct file_operations vmlogrdr_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	.owner   = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	.open    = vmlogrdr_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	.release = vmlogrdr_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	.read    = vmlogrdr_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	.llseek  = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void vmlogrdr_iucv_message_pending(struct iucv_path *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 					  struct iucv_message *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static struct iucv_handler vmlogrdr_iucv_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	.path_complete	 = vmlogrdr_iucv_path_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	.path_severed	 = vmlogrdr_iucv_path_severed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	.message_pending = vmlogrdr_iucv_message_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * pointer to system service private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * minor number 0 --> logrec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * minor number 1 --> account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * minor number 2 --> symptom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static struct vmlogrdr_priv_t sys_ser[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	{ .system_service = "*LOGREC ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	  .internal_name  = "logrec",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	  .recording_name = "EREP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	  .minor_num      = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	  .buffer_free    = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	  .autorecording  = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	  .autopurge      = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	{ .system_service = "*ACCOUNT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	  .internal_name  = "account",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	  .recording_name = "ACCOUNT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	  .minor_num      = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	  .buffer_free    = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	  .autorecording  = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	  .autopurge      = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	{ .system_service = "*SYMPTOM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	  .internal_name  = "symptom",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	  .recording_name = "SYMPTOM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	  .minor_num      = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	  .buffer_free    = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	  .autorecording  = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	  .autopurge      = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define MAXMINOR  ARRAY_SIZE(sys_ser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static char FENCE[] = {"EOR"};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int vmlogrdr_major = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static struct cdev  *vmlogrdr_cdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static int recording_class_AB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct vmlogrdr_priv_t * logptr = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	spin_lock(&logptr->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	logptr->connection_established = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	spin_unlock(&logptr->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	wake_up(&conn_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	struct vmlogrdr_priv_t * logptr = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	u8 reason = (u8) ipuser[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	pr_err("vmlogrdr: connection severed with reason %i\n", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	iucv_path_sever(path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	logptr->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	spin_lock(&logptr->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	logptr->connection_established = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	logptr->iucv_path_severed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	spin_unlock(&logptr->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	wake_up(&conn_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	/* just in case we're sleeping waiting for a record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	wake_up_interruptible(&read_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 					  struct iucv_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	struct vmlogrdr_priv_t * logptr = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 * This function is the bottom half so it should be quick.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	 * Copy the external interrupt data into our local eib and increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	 * the usage count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	spin_lock(&logptr->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	atomic_inc(&logptr->receive_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	spin_unlock(&logptr->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	wake_up_interruptible(&read_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static int vmlogrdr_get_recording_class_AB(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	static const char cp_command[] = "QUERY COMMAND RECORDING ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	char cp_response[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	char *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	int len,i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	len = strnlen(cp_response,sizeof(cp_response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	// now the parsing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	tail=strnchr(cp_response,len,'=');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	if (!tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (!strncmp("ANY",tail,3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	if (!strncmp("NONE",tail,4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 * expect comma separated list of classes here, if one of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	 * is A or B return 1 otherwise 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)         for (i=tail-cp_response; i<len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		if ( cp_response[i]=='A' || cp_response[i]=='B' )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			      int action, int purge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	char cp_command[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	char cp_response[160];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	char *onoff, *qid_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	onoff = ((action == 1) ? "ON" : "OFF");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	qid_string = ((recording_class_AB == 1) ? " QID * " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	 * The recording commands needs to be called with option QID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 * for guests that have previlege classes A or B.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	 * Purging has to be done as separate step, because recording
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 * can't be switched on as long as records are on the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	 * Doing both at the same time doesn't work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (purge && (action == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		memset(cp_command, 0x00, sizeof(cp_command));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		memset(cp_response, 0x00, sizeof(cp_response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		snprintf(cp_command, sizeof(cp_command),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			 "RECORDING %s PURGE %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			 logptr->recording_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			 qid_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	memset(cp_command, 0x00, sizeof(cp_command));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	memset(cp_response, 0x00, sizeof(cp_response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		logptr->recording_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		onoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		qid_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	/* The recording command will usually answer with 'Command complete'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	 * on success, but when the specific service was never connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	 * before then there might be an additional informational message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	 * 'HCPCRC8072I Recording entry not found' before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	 * 'Command complete'. So I use strstr rather then the strncmp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	if (strstr(cp_response,"Command complete"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * If we turn recording off, we have to purge any remaining records
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 * afterwards, as a large number of queued records may impact z/VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	 * performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (purge && (action == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		memset(cp_command, 0x00, sizeof(cp_command));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		memset(cp_response, 0x00, sizeof(cp_response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		snprintf(cp_command, sizeof(cp_command),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			 "RECORDING %s PURGE %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			 logptr->recording_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			 qid_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int vmlogrdr_open (struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	int dev_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct vmlogrdr_priv_t * logptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	int connect_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	dev_num = iminor(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (dev_num >= MAXMINOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	logptr = &sys_ser[dev_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 * only allow for blocking reads to be open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	if (filp->f_flags & O_NONBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	/* Besure this device hasn't already been opened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	spin_lock_bh(&logptr->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (logptr->dev_in_use)	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		spin_unlock_bh(&logptr->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	logptr->dev_in_use = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	logptr->connection_established = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	logptr->iucv_path_severed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	atomic_set(&logptr->receive_ready, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	logptr->buffer_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	spin_unlock_bh(&logptr->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	/* set the file options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	filp->private_data = logptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	/* start recording for this service*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (logptr->autorecording) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			pr_warn("vmlogrdr: failed to start recording automatically\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	/* create connection to the system service */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	if (!logptr->path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		goto out_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 				       logptr->system_service, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 				       logptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	if (connect_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		pr_err("vmlogrdr: iucv connection to %s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		       "failed with rc %i \n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		       logptr->system_service, connect_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		goto out_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	/* We've issued the connect and now we must wait for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	 * ConnectionComplete or ConnectinSevered Interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	 * before we can continue to process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	wait_event(conn_wait_queue, (logptr->connection_established)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		   || (logptr->iucv_path_severed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	if (logptr->iucv_path_severed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		goto out_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	nonseekable_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) out_record:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (logptr->autorecording)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		vmlogrdr_recording(logptr,0,logptr->autopurge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) out_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	kfree(logptr->path);	/* kfree(NULL) is ok. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	logptr->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) out_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	logptr->dev_in_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int vmlogrdr_release (struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	struct vmlogrdr_priv_t * logptr = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	iucv_path_sever(logptr->path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	kfree(logptr->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	logptr->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (logptr->autorecording) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			pr_warn("vmlogrdr: failed to stop recording automatically\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	logptr->dev_in_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	int rc, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	/* we need to keep track of two data sizes here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	 * The number of bytes we need to receive from iucv and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	 * the total number of bytes we actually write into the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	int user_data_count, iucv_data_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	char * buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	if (atomic_read(&priv->receive_ready)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		spin_lock_bh(&priv->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		if (priv->residual_length){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			/* receive second half of a record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			iucv_data_count = priv->residual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			user_data_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			buffer = priv->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			/* receive a new record:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			 * We need to return the total length of the record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)                          * + size of FENCE in the first 4 bytes of the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		         */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			iucv_data_count = priv->local_interrupt_buffer.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			user_data_count = sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			temp = (int*)priv->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			*temp= iucv_data_count + sizeof(FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			buffer = priv->buffer + sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		 * If the record is bigger than our buffer, we receive only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		 * a part of it. We can get the rest later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		if (iucv_data_count > NET_BUFFER_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			iucv_data_count = NET_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		rc = iucv_message_receive(priv->path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 					  &priv->local_interrupt_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 					  0, buffer, iucv_data_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 					  &priv->residual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		spin_unlock_bh(&priv->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		/* An rc of 5 indicates that the record was bigger than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		 * the buffer, which is OK for us. A 9 indicates that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		 * record was purged befor we could receive it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		if (rc == 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		if (rc == 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 			atomic_set(&priv->receive_ready, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		priv->buffer_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  		user_data_count += iucv_data_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		priv->current_position = priv->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		if (priv->residual_length == 0){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			/* the whole record has been captured,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 			 * now add the fence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			atomic_dec(&priv->receive_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			buffer = priv->buffer + user_data_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			memcpy(buffer, FENCE, sizeof(FENCE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 			user_data_count += sizeof(FENCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		priv->remaining = user_data_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 			     size_t count, loff_t * ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	struct vmlogrdr_priv_t * priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	while (priv->buffer_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		rc = vmlogrdr_receive_data(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 			rc = wait_event_interruptible(read_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 					atomic_read(&priv->receive_ready));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 				return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	/* copy only up to end of record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	if (count > priv->remaining)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		count = priv->remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	if (copy_to_user(data, priv->current_position, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	*ppos += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	priv->current_position += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	priv->remaining -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	/* if all data has been transferred, set buffer free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	if (priv->remaining == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		priv->buffer_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static ssize_t vmlogrdr_autopurge_store(struct device * dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 					struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 					const char * buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	ssize_t ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	switch (buf[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	case '0':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		priv->autopurge=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	case '1':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		priv->autopurge=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static ssize_t vmlogrdr_autopurge_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 				       struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 				       char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	return sprintf(buf, "%u\n", priv->autopurge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		   vmlogrdr_autopurge_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static ssize_t vmlogrdr_purge_store(struct device * dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 				    struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 				    const char * buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	char cp_command[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	char cp_response[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	if (buf[0] != '1')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	memset(cp_command, 0x00, sizeof(cp_command));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	memset(cp_response, 0x00, sizeof(cp_response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)         /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	 * The recording command needs to be called with option QID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	 * for guests that have previlege classes A or B.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	 * Other guests will not recognize the command and we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	 * issue the same command without the QID parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	if (recording_class_AB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		snprintf(cp_command, sizeof(cp_command),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 			 "RECORDING %s PURGE QID * ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 			 priv->recording_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		snprintf(cp_command, sizeof(cp_command),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 			 "RECORDING %s PURGE ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			 priv->recording_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static ssize_t vmlogrdr_autorecording_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 					    struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 					    const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	ssize_t ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	switch (buf[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	case '0':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		priv->autorecording=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	case '1':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		priv->autorecording=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static ssize_t vmlogrdr_autorecording_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 					   struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 					   char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	return sprintf(buf, "%u\n", priv->autorecording);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		   vmlogrdr_autorecording_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static ssize_t vmlogrdr_recording_store(struct device * dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 					struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 					const char * buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	switch (buf[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	case '0':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		ret = vmlogrdr_recording(priv,0,0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	case '1':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		ret = vmlogrdr_recording(priv,1,0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static ssize_t recording_status_show(struct device_driver *driver, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	static const char cp_command[] = "QUERY RECORDING ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	cpcmd(cp_command, buf, 4096, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	len = strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static DRIVER_ATTR_RO(recording_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static struct attribute *vmlogrdr_drv_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	&driver_attr_recording_status.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static struct attribute_group vmlogrdr_drv_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	.attrs = vmlogrdr_drv_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	&vmlogrdr_drv_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static struct attribute *vmlogrdr_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	&dev_attr_autopurge.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	&dev_attr_purge.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	&dev_attr_autorecording.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	&dev_attr_recording.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static struct attribute_group vmlogrdr_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	.attrs = vmlogrdr_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) static const struct attribute_group *vmlogrdr_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	&vmlogrdr_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static int vmlogrdr_pm_prepare(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	if (priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		spin_lock_bh(&priv->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		if (priv->dev_in_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 			rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		spin_unlock_bh(&priv->priv_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		       dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static const struct dev_pm_ops vmlogrdr_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	.prepare = vmlogrdr_pm_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static struct class *vmlogrdr_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static struct device_driver vmlogrdr_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	.name = "vmlogrdr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	.bus  = &iucv_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	.pm = &vmlogrdr_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	.groups = vmlogrdr_drv_attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int vmlogrdr_register_driver(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	/* Register with iucv driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	ret = iucv_register(&vmlogrdr_iucv_handler, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	ret = driver_register(&vmlogrdr_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		goto out_iucv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	if (IS_ERR(vmlogrdr_class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 		ret = PTR_ERR(vmlogrdr_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		vmlogrdr_class = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 		goto out_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) out_driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	driver_unregister(&vmlogrdr_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) out_iucv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	iucv_unregister(&vmlogrdr_iucv_handler, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static void vmlogrdr_unregister_driver(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	class_destroy(vmlogrdr_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	vmlogrdr_class = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	driver_unregister(&vmlogrdr_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	iucv_unregister(&vmlogrdr_iucv_handler, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	dev = kzalloc(sizeof(struct device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		dev_set_name(dev, "%s", priv->internal_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		dev->bus = &iucv_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		dev->parent = iucv_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 		dev->driver = &vmlogrdr_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 		dev->groups = vmlogrdr_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		dev_set_drvdata(dev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 		 * The release function could be called after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 		 * module has been unloaded. It's _only_ task is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 		 * free the struct. Therefore, we specify kfree()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		 * directly here. (Probably a little bit obfuscating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 		 * but legitime ...).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		dev->release = (void (*)(struct device *))kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	ret = device_register(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	priv->class_device = device_create(vmlogrdr_class, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 					   MKDEV(vmlogrdr_major,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 						 priv->minor_num),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 					   priv, "%s", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	if (IS_ERR(priv->class_device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		ret = PTR_ERR(priv->class_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 		priv->class_device=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		device_unregister(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	priv->device = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	if (priv->device != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 		device_unregister(priv->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 		priv->device=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static int vmlogrdr_register_cdev(dev_t dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	vmlogrdr_cdev = cdev_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	if (!vmlogrdr_cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	vmlogrdr_cdev->owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	vmlogrdr_cdev->ops = &vmlogrdr_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	// cleanup: cdev is not fully registered, no cdev_del here!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	kobject_put(&vmlogrdr_cdev->kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	vmlogrdr_cdev=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static void vmlogrdr_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)         int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	if (vmlogrdr_cdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 		cdev_del(vmlogrdr_cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 		vmlogrdr_cdev=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	for (i=0; i < MAXMINOR; ++i ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 		vmlogrdr_unregister_device(&sys_ser[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 		free_page((unsigned long)sys_ser[i].buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	vmlogrdr_unregister_driver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 	if (vmlogrdr_major) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 		unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 		vmlogrdr_major=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static int __init vmlogrdr_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 	dev_t dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	if (! MACHINE_IS_VM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 		pr_err("not running under VM, driver not loaded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)         recording_class_AB = vmlogrdr_get_recording_class_AB();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 	vmlogrdr_major = MAJOR(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 	rc=vmlogrdr_register_driver();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	for (i=0; i < MAXMINOR; ++i ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 		if (!sys_ser[i].buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 			rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 		sys_ser[i].current_position = sys_ser[i].buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 		rc=vmlogrdr_register_device(&sys_ser[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 	rc = vmlogrdr_register_cdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 		goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 	vmlogrdr_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static void __exit vmlogrdr_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 	vmlogrdr_cleanup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) module_init(vmlogrdr_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) module_exit(vmlogrdr_exit);