^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * z/VM IUCV hypervisor console (HVC) device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This HVC device driver provides terminal access using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * z/VM IUCV communication paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright IBM Corp. 2008, 2013
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define KMSG_COMPONENT "hvc_iucv"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/ebcdic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/iucv/iucv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "hvc_console.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* General device driver settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define HVC_IUCV_MAGIC 0xc9e4c3e5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* IUCV TTY message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define MSG_VERSION 0x02 /* Message version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define MSG_TYPE_ERROR 0x01 /* Error message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define MSG_TYPE_DATA 0x10 /* Terminal data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct iucv_tty_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u8 version; /* Message version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u8 type; /* Message type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define MSG_MAX_DATALEN ((u16)(~0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u16 datalen; /* Payload length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u8 data[]; /* Payload buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) } __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) enum iucv_state_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) IUCV_DISCONN = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) IUCV_CONNECTED = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) IUCV_SEVERED = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) enum tty_state_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) TTY_CLOSED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) TTY_OPENED = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct hvc_iucv_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct hvc_struct *hvc; /* HVC struct reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u8 srv_name[8]; /* IUCV service name (ebcdic) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned char is_console; /* Linux console usage flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) enum iucv_state_t iucv_state; /* IUCV connection status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) enum tty_state_t tty_state; /* TTY status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct iucv_path *path; /* IUCV path pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) spinlock_t lock; /* hvc_iucv_private lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void *sndbuf; /* send buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) size_t sndbuf_len; /* length of send buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define QUEUE_SNDBUF_DELAY (HZ / 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) wait_queue_head_t sndbuf_waitq; /* wait for send completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct list_head tty_outqueue; /* outgoing IUCV messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct list_head tty_inqueue; /* incoming IUCV messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct device *dev; /* device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u8 info_path[16]; /* IUCV path info (dev attr) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct iucv_tty_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct list_head list; /* list pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct iucv_message msg; /* store an IUCV message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) size_t offset; /* data buffer offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* IUCV callback handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Kernel module parameter: use one terminal device as default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static unsigned long hvc_iucv_devices = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* Array of allocated hvc iucv tty lines... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define IUCV_HVC_CON_IDX (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define MAX_VMID_FILTER (500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define FILTER_WILDCARD_CHAR '*'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static size_t hvc_iucv_filter_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void *hvc_iucv_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static const char *hvc_iucv_filter_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static DEFINE_RWLOCK(hvc_iucv_filter_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Kmem cache and mempool for iucv_tty_buffer elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static struct kmem_cache *hvc_iucv_buffer_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static mempool_t *hvc_iucv_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* IUCV handler callback functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static struct iucv_handler hvc_iucv_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) .path_pending = hvc_iucv_path_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) .path_severed = hvc_iucv_path_severed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) .message_complete = hvc_iucv_msg_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) .message_pending = hvc_iucv_msg_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * @num: The HVC virtual terminal number (vtermno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * This function returns the struct hvc_iucv_private instance that corresponds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * to the HVC virtual terminal number specified as parameter @num.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return hvc_iucv_table[num - HVC_IUCV_MAGIC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @size: Size of the internal buffer used to store data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @flags: Memory allocation flags passed to mempool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * This function allocates a new struct iucv_tty_buffer element and, optionally,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * allocates an internal data buffer with the specified size @size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * The internal data buffer is always allocated with GFP_DMA which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * required for receiving and sending data with IUCV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Note: The total message size arises from the internal buffer size and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * members of the iucv_tty_msg structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * The function returns NULL if memory allocation has failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct iucv_tty_buffer *bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) bufp = mempool_alloc(hvc_iucv_mempool, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (!bufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) memset(bufp, 0, sizeof(*bufp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) bufp->msg.length = MSG_SIZE(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!bufp->mbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) mempool_free(bufp, hvc_iucv_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) bufp->mbuf->version = MSG_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) bufp->mbuf->type = MSG_TYPE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) bufp->mbuf->datalen = (u16) size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return bufp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) kfree(bufp->mbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mempool_free(bufp, hvc_iucv_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * @list: List containing struct iucv_tty_buffer elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void destroy_tty_buffer_list(struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct iucv_tty_buffer *ent, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) list_for_each_entry_safe(ent, next, list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) list_del(&ent->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) destroy_tty_buffer(ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * @priv: Pointer to struct hvc_iucv_private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @buf: HVC buffer for writing received terminal data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * @count: HVC buffer size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * @has_more_data: Pointer to an int variable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * The function picks up pending messages from the input queue and receives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * the message data that is then written to the specified buffer @buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * If the buffer size @count is less than the data message size, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * message is kept on the input queue and @has_more_data is set to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * If all message data has been written, the message is removed from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * the input queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * The function returns the number of bytes written to the terminal, zero if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * there are no pending data messages available or if there is no established
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * IUCV path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * If the IUCV path has been severed, then -EPIPE is returned to cause a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * hang up (that is issued by the HVC layer).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int hvc_iucv_write(struct hvc_iucv_private *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) char *buf, int count, int *has_more_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct iucv_tty_buffer *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* immediately return if there is no IUCV connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (priv->iucv_state == IUCV_DISCONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* if the IUCV path has been severed, return -EPIPE to inform the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * HVC layer to hang up the tty device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (priv->iucv_state == IUCV_SEVERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* check if there are pending messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (list_empty(&priv->tty_inqueue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* receive an iucv message and flip data to the tty (ldisc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!rb->mbuf) { /* message not yet received ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* allocate mem to store msg data; if no memory is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * then leave the buffer on the list and re-try later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!rb->mbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) rc = __iucv_message_receive(priv->path, &rb->msg, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) rb->mbuf, rb->msg.length, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) case 0: /* Successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) case 2: /* No message found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) case 9: /* Message purged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) written = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* remove buffer if an error has occurred or received data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * is not correct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (rc || (rb->mbuf->version != MSG_VERSION) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) goto out_remove_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) switch (rb->mbuf->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) case MSG_TYPE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) written = min_t(int, rb->mbuf->datalen - rb->offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) memcpy(buf, rb->mbuf->data + rb->offset, written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (written < (rb->mbuf->datalen - rb->offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) rb->offset += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) *has_more_data = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) goto out_written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) case MSG_TYPE_WINSIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (rb->mbuf->datalen != sizeof(struct winsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* The caller must ensure that the hvc is locked, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * is the case when called from hvc_iucv_get_chars() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) case MSG_TYPE_ERROR: /* ignored ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) case MSG_TYPE_TERMENV: /* ignored ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case MSG_TYPE_TERMIOS: /* ignored ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) out_remove_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) list_del(&rb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) destroy_tty_buffer(rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) *has_more_data = !list_empty(&priv->tty_inqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) out_written:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * hvc_iucv_get_chars() - HVC get_chars operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * @vtermno: HVC virtual terminal number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * @buf: Pointer to a buffer to store data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * @count: Size of buffer available for writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * The HVC thread calls this method to read characters from the back-end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * If an IUCV communication path has been established, pending IUCV messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * are received and data is copied into buffer @buf up to @count bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Locking: The routine gets called under an irqsave() spinlock; and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * the routine locks the struct hvc_iucv_private->lock to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * helper functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) int has_more_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) has_more_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) written = hvc_iucv_write(priv, buf, count, &has_more_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* if there are still messages on the queue... schedule another run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (has_more_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) hvc_kick();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * hvc_iucv_queue() - Buffer terminal data for sending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * @priv: Pointer to struct hvc_iucv_private instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * @buf: Buffer containing data to send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * @count: Size of buffer and amount of data to send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * The function queues data for sending. To actually send the buffered data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * The function returns the number of data bytes that has been buffered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * If the device is not connected, data is ignored and the function returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * @count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * If the buffer is full, the function returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * If an existing IUCV communicaton path has been severed, -EPIPE is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * (that can be passed to HVC layer to cause a tty hangup).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (priv->iucv_state == IUCV_DISCONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return count; /* ignore data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (priv->iucv_state == IUCV_SEVERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) priv->sndbuf_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (priv->iucv_state == IUCV_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * hvc_iucv_send() - Send an IUCV message containing terminal data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * @priv: Pointer to struct hvc_iucv_private instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * If an IUCV communication path has been established, the buffered output data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * is sent via an IUCV message and the number of bytes sent is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Returns 0 if there is no established IUCV communication path or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * -EPIPE if an existing IUCV communicaton path has been severed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static int hvc_iucv_send(struct hvc_iucv_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct iucv_tty_buffer *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int rc, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (priv->iucv_state == IUCV_SEVERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (priv->iucv_state == IUCV_DISCONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!priv->sndbuf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* allocate internal buffer to store msg data and also compute total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * message length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) sb->mbuf->datalen = (u16) priv->sndbuf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) list_add_tail(&sb->list, &priv->tty_outqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) (void *) sb->mbuf, sb->msg.length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* drop the message here; however we might want to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * 0x03 (msg limit reached) by trying again... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) list_del(&sb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) destroy_tty_buffer(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) len = priv->sndbuf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) priv->sndbuf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * @work: Work structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * This work queue function sends buffered output data over IUCV and,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * if not all buffered data could be sent, reschedules itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void hvc_iucv_sndbuf_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct hvc_iucv_private *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) hvc_iucv_send(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * hvc_iucv_put_chars() - HVC put_chars operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * @vtermno: HVC virtual terminal number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * @buf: Pointer to an buffer to read data from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * @count: Size of buffer available for reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * The HVC thread calls this method to write characters to the back-end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * The function calls hvc_iucv_queue() to queue terminal data for sending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * Locking: The method gets called under an irqsave() spinlock; and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * locks struct hvc_iucv_private->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) queued = hvc_iucv_queue(priv, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * @hp: Pointer to the HVC device (struct hvc_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * @id: Additional data (originally passed to hvc_alloc): the index of an struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * hvc_iucv_private instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * instance that is derived from @id. Always returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * Locking: struct hvc_iucv_private->lock, spin_lock_bh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct hvc_iucv_private *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) priv = hvc_iucv_get_private(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) priv->tty_state = TTY_OPENED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @priv: Pointer to the struct hvc_iucv_private instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) destroy_tty_buffer_list(&priv->tty_outqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) destroy_tty_buffer_list(&priv->tty_inqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) priv->tty_state = TTY_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) priv->iucv_state = IUCV_DISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) priv->sndbuf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * tty_outqueue_empty() - Test if the tty outq is empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * @priv: Pointer to struct hvc_iucv_private instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) rc = list_empty(&priv->tty_outqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * flush_sndbuf_sync() - Flush send buffer and wait for completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * @priv: Pointer to struct hvc_iucv_private instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * to flush any buffered terminal output data and waits for completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int sync_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) cancel_delayed_work_sync(&priv->sndbuf_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) hvc_iucv_send(priv); /* force sending buffered data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (sync_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) wait_event_timeout(priv->sndbuf_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) tty_outqueue_empty(priv), HZ/10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * @priv: Pointer to hvc_iucv_private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * This routine severs an existing IUCV communication path and hangs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * up the underlying HVC terminal device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * The hang-up occurs only if an IUCV communication path is established;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * otherwise there is no need to hang up the terminal device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * The IUCV HVC hang-up is separated into two steps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * 1. After the IUCV path has been severed, the iucv_state is set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * IUCV_SEVERED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * If the tty has not yet been opened, clean up the hvc_iucv_private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * structure to allow re-connects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * If the tty has been opened, let get_chars() return -EPIPE to signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * the HVC layer to hang up the tty and, if so, wake up the HVC thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * to call get_chars()...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * Special notes on hanging up a HVC terminal instantiated as console:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * 2. do_tty_hangup() calls tty->ops->close() for console_filp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * => no hangup notifier is called by HVC (default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * 2. hvc_close() returns because of tty_hung_up_p(filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * => no delete notifier is called!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Finally, the back-end is not being notified, thus, the tty session is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * kept active (TTY_OPEN) to be ready for re-connects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * Locking: spin_lock(&priv->lock) w/o disabling bh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct iucv_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (priv->iucv_state == IUCV_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) path = priv->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) priv->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) priv->iucv_state = IUCV_SEVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (priv->tty_state == TTY_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) hvc_iucv_cleanup(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* console is special (see above) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (priv->is_console) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) hvc_iucv_cleanup(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) priv->tty_state = TTY_OPENED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) hvc_kick();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* finally sever path (outside of priv->lock due to lock ordering) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) iucv_path_sever(path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) iucv_path_free(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * @hp: Pointer to the HVC device (struct hvc_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * @id: Additional data (originally passed to hvc_alloc):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * the index of an struct hvc_iucv_private instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * This routine notifies the HVC back-end that a tty hangup (carrier loss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * virtual or otherwise) has occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * to keep an existing IUCV communication path established.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * (Background: vhangup() is called from user space (by getty or login) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * disable writing to the tty by other applications).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * If the tty has been opened and an established IUCV path has been severed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Locking: struct hvc_iucv_private->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct hvc_iucv_private *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) priv = hvc_iucv_get_private(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) flush_sndbuf_sync(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* NOTE: If the hangup was scheduled by ourself (from the iucv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * path_servered callback [IUCV_SEVERED]), we have to clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * our structure and to set state to TTY_CLOSED.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * If the tty was hung up otherwise (e.g. vhangup()), then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * ignore this hangup and keep an established IUCV path open...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * (...the reason is that we are not able to connect back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * client if we disconnect on hang up) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) priv->tty_state = TTY_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (priv->iucv_state == IUCV_SEVERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) hvc_iucv_cleanup(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * @hp: Pointer the HVC device (struct hvc_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * @raise: Non-zero to raise or zero to lower DTR/RTS lines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * This routine notifies the HVC back-end to raise or lower DTR/RTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * lines. Raising DTR/RTS is ignored. Lowering DTR/RTS indicates to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * drop the IUCV connection (similar to hang up the modem).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct hvc_iucv_private *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct iucv_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Raising the DTR/RTS is ignored as IUCV connections can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * established at any times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (raise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) priv = hvc_iucv_get_private(hp->vtermno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Lowering the DTR/RTS lines disconnects an established IUCV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) flush_sndbuf_sync(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) path = priv->path; /* save reference to IUCV path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) priv->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) priv->iucv_state = IUCV_DISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Sever IUCV path outside of priv->lock due to lock ordering of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * priv->lock <--> iucv_table_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) iucv_path_sever(path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) iucv_path_free(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * @hp: Pointer to the HVC device (struct hvc_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * @id: Additional data (originally passed to hvc_alloc):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * the index of an struct hvc_iucv_private instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * This routine notifies the HVC back-end that the last tty device fd has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * closed. The function cleans up tty resources. The clean-up of the IUCV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * control setting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * Locking: struct hvc_iucv_private->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct hvc_iucv_private *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) priv = hvc_iucv_get_private(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) flush_sndbuf_sync(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) destroy_tty_buffer_list(&priv->tty_outqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) destroy_tty_buffer_list(&priv->tty_inqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) priv->tty_state = TTY_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) priv->sndbuf_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * @ipvmid: Originating z/VM user ID (right padded with blanks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * Returns 0 if the z/VM user ID that is specified with @ipvmid is permitted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * connect, otherwise non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static int hvc_iucv_filter_connreq(u8 ipvmid[8])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) const char *wildcard, *filter_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) size_t i, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* Note: default policy is ACCEPT if no filter is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!hvc_iucv_filter_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) for (i = 0; i < hvc_iucv_filter_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) filter_entry = hvc_iucv_filter + (8 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* If a filter entry contains the filter wildcard character,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * reduce the length to match the leading portion of the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * ID only (wildcard match). Characters following the wildcard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * are ignored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) len = (wildcard) ? wildcard - filter_entry : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (0 == memcmp(ipvmid, filter_entry, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * hvc_iucv_path_pending() - IUCV handler to process a connection request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * @path: Pending path (struct iucv_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * @ipvmid: z/VM system identifier of originator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * @ipuser: User specified data for this path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * (AF_IUCV: port/service name and originator port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * The function uses the @ipuser data to determine if the pending path belongs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * to a terminal managed by this device driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * If the path belongs to this driver, ensure that the terminal is not accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * multiple times (only one connection to a terminal is allowed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * If the terminal is not yet connected, the pending path is accepted and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * associated to the appropriate struct hvc_iucv_private instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * Returns 0 if @path belongs to a terminal managed by the this device driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * otherwise returns -ENODEV in order to dispatch this path to other handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * Locking: struct hvc_iucv_private->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) u8 *ipuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct hvc_iucv_private *priv, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) u8 wildcard[9] = "lnxhvc ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int i, rc, find_unused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) u8 nuser_data[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) u8 vm_user_id[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ASCEBC(wildcard, sizeof(wildcard));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) find_unused = !memcmp(wildcard, ipuser, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* First, check if the pending path request is managed by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * IUCV handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * - find a disconnected device if ipuser contains the wildcard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * - find the device that matches the terminal ID in ipuser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) for (i = 0; i < hvc_iucv_devices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) tmp = hvc_iucv_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (find_unused) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) spin_lock(&tmp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (tmp->iucv_state == IUCV_DISCONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) priv = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) spin_unlock(&tmp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) } else if (!memcmp(tmp->srv_name, ipuser, 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) priv = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* Enforce that ipvmid is allowed to connect to us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) read_lock(&hvc_iucv_filter_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) rc = hvc_iucv_filter_connreq(ipvmid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) read_unlock(&hvc_iucv_filter_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) iucv_path_sever(path, ipuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) iucv_path_free(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) memcpy(vm_user_id, ipvmid, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) vm_user_id[8] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) pr_info("A connection request from z/VM user ID %s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) "was refused\n", vm_user_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* If the terminal is already connected or being severed, then sever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * this path to enforce that there is only ONE established communication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * path per terminal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (priv->iucv_state != IUCV_DISCONN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) iucv_path_sever(path, ipuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) iucv_path_free(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) goto out_path_handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* accept path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) path->msglim = 0xffff; /* IUCV MSGLIMIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) iucv_path_sever(path, ipuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) iucv_path_free(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) goto out_path_handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) priv->path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) priv->iucv_state = IUCV_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* store path information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) memcpy(priv->info_path, ipvmid, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) memcpy(priv->info_path + 8, ipuser + 8, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* flush buffered output data... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) schedule_delayed_work(&priv->sndbuf_work, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) out_path_handled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * hvc_iucv_path_severed() - IUCV handler to process a path sever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * @path: Pending path (struct iucv_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * @ipuser: User specified data for this path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * (AF_IUCV: port/service name and originator port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * This function calls the hvc_iucv_hangup() function for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * respective IUCV HVC terminal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * Locking: struct hvc_iucv_private->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) struct hvc_iucv_private *priv = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) hvc_iucv_hangup(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * @path: Pending path (struct iucv_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * @msg: Pointer to the IUCV message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * The function puts an incoming message on the input queue for later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * If the tty has not yet been opened, the message is rejected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * Locking: struct hvc_iucv_private->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static void hvc_iucv_msg_pending(struct iucv_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct iucv_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct hvc_iucv_private *priv = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) struct iucv_tty_buffer *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* reject messages that exceed max size of iucv_tty_msg->datalen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) iucv_message_reject(path, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* reject messages if tty has not yet been opened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (priv->tty_state == TTY_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) iucv_message_reject(path, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) goto unlock_return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* allocate tty buffer to save iucv msg only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) rb = alloc_tty_buffer(0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (!rb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) iucv_message_reject(path, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) goto unlock_return; /* -ENOMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) rb->msg = *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) list_add_tail(&rb->list, &priv->tty_inqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) hvc_kick(); /* wake up hvc thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) unlock_return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * hvc_iucv_msg_complete() - IUCV handler to process message completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * @path: Pending path (struct iucv_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * @msg: Pointer to the IUCV message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * The function is called upon completion of message delivery to remove the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * message from the outqueue. Additional delivery information can be found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * purged messages (0x010000 (IPADPGNR)).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * Locking: struct hvc_iucv_private->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) static void hvc_iucv_msg_complete(struct iucv_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct iucv_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct hvc_iucv_private *priv = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct iucv_tty_buffer *ent, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) LIST_HEAD(list_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (ent->msg.id == msg->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) list_move(&ent->list, &list_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) wake_up(&priv->sndbuf_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) destroy_tty_buffer_list(&list_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * hvc_iucv_pm_freeze() - Freeze PM callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * @dev: IUVC HVC terminal device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * Sever an established IUCV communication path and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * trigger a hang-up of the underlying HVC terminal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static int hvc_iucv_pm_freeze(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct hvc_iucv_private *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) hvc_iucv_hangup(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * @dev: IUVC HVC terminal device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * Wake up the HVC thread to trigger hang-up and respective
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * HVC back-end notifier invocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static int hvc_iucv_pm_restore_thaw(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) hvc_kick();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct hvc_iucv_private *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) len = sizeof(priv->srv_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) memcpy(buf, priv->srv_name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) EBCASC(buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) buf[len++] = '\n';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static ssize_t hvc_iucv_dev_state_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct hvc_iucv_private *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct hvc_iucv_private *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) char vmid[9], ipuser[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) memset(vmid, 0, sizeof(vmid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) memset(ipuser, 0, sizeof(ipuser));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (priv->iucv_state == IUCV_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) memcpy(vmid, priv->info_path, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) memcpy(ipuser, priv->info_path + 8, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) EBCASC(ipuser, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return sprintf(buf, "%s:%s\n", vmid, ipuser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* HVC operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static const struct hv_ops hvc_iucv_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) .get_chars = hvc_iucv_get_chars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) .put_chars = hvc_iucv_put_chars,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) .notifier_add = hvc_iucv_notifier_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) .notifier_del = hvc_iucv_notifier_del,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) .notifier_hangup = hvc_iucv_notifier_hangup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) .dtr_rts = hvc_iucv_dtr_rts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /* Suspend / resume device operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static const struct dev_pm_ops hvc_iucv_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) .freeze = hvc_iucv_pm_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) .thaw = hvc_iucv_pm_restore_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) .restore = hvc_iucv_pm_restore_thaw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /* IUCV HVC device driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static struct device_driver hvc_iucv_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) .name = KMSG_COMPONENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) .bus = &iucv_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) .pm = &hvc_iucv_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* IUCV HVC device attributes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static struct attribute *hvc_iucv_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) &dev_attr_termid.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) &dev_attr_state.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) &dev_attr_peer.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static struct attribute_group hvc_iucv_dev_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) .attrs = hvc_iucv_dev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) &hvc_iucv_dev_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * @id: hvc_iucv_table index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * @is_console: Flag if the instance is used as Linux console
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * This function allocates a new hvc_iucv_private structure and stores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * the instance in hvc_iucv_table at index @id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * Returns 0 on success; otherwise non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static int __init hvc_iucv_alloc(int id, unsigned int is_console)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) struct hvc_iucv_private *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) char name[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) spin_lock_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) INIT_LIST_HEAD(&priv->tty_outqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) INIT_LIST_HEAD(&priv->tty_inqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) init_waitqueue_head(&priv->sndbuf_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (!priv->sndbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /* set console flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) priv->is_console = is_console;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /* allocate hvc device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (IS_ERR(priv->hvc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) rc = PTR_ERR(priv->hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) goto out_error_hvc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) /* notify HVC thread instead of using polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) priv->hvc->irq_requested = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /* setup iucv related information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) snprintf(name, 9, "lnxhvc%-2d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) memcpy(priv->srv_name, name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) ASCEBC(priv->srv_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* create and setup device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (!priv->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) goto out_error_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) dev_set_name(priv->dev, "hvc_iucv%d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) dev_set_drvdata(priv->dev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) priv->dev->bus = &iucv_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) priv->dev->parent = iucv_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) priv->dev->driver = &hvc_iucv_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) priv->dev->groups = hvc_iucv_dev_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) priv->dev->release = (void (*)(struct device *)) kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) rc = device_register(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) put_device(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) goto out_error_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) hvc_iucv_table[id] = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) out_error_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) hvc_remove(priv->hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) out_error_hvc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) free_page((unsigned long) priv->sndbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) hvc_remove(priv->hvc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) device_unregister(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) free_page((unsigned long) priv->sndbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * @filter: String containing a comma-separated list of z/VM user IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * @dest: Location where to store the parsed z/VM user ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) const char *nextdelim, *residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) nextdelim = strchr(filter, ',');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (nextdelim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) len = nextdelim - filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) residual = nextdelim + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) len = strlen(filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) residual = filter + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* check for '\n' (if called from sysfs) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (filter[len - 1] == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) /* prohibit filter entries containing the wildcard character only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (len > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /* pad with blanks and save upper case version of user ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) memset(dest, ' ', 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) while (len--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) dest[len] = toupper(filter[len]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * hvc_iucv_setup_filter() - Set up z/VM user ID filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * @filter: String consisting of a comma-separated list of z/VM user IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * The function parses the @filter string and creates an array containing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * the list of z/VM user ID filter entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * Return code 0 means success, -EINVAL if the filter is syntactically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * incorrect, -ENOMEM if there was not enough memory to allocate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static int hvc_iucv_setup_filter(const char *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) const char *residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) size_t size, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) void *array, *old_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) count = strlen(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (count == 0 || (count == 1 && val[0] == '\n')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) array = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) goto out_replace_filter; /* clear filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /* count user IDs in order to allocate sufficient memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) residual = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) while ((residual = strchr(residual, ',')) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) residual++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* check if the specified list exceeds the filter limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (size > MAX_VMID_FILTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) array = kcalloc(size, 8, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) count = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) residual = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) while (*residual && count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) residual = hvc_iucv_parse_filter(residual,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) array + ((size - count) * 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (IS_ERR(residual)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) err = PTR_ERR(residual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) kfree(array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) out_replace_filter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) write_lock_bh(&hvc_iucv_filter_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) old_filter = hvc_iucv_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) hvc_iucv_filter_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) hvc_iucv_filter = array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) write_unlock_bh(&hvc_iucv_filter_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) kfree(old_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * param_set_vmidfilter() - Set z/VM user ID filter parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * @val: String consisting of a comma-separated list of z/VM user IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * @kp: Kernel parameter pointing to hvc_iucv_filter array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * The function sets up the z/VM user ID filter specified as comma-separated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * list of user IDs in @val.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * Note: If it is called early in the boot process, @val is stored and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * parsed later in hvc_iucv_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!MACHINE_IS_VM || !hvc_iucv_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (slab_is_available())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) rc = hvc_iucv_setup_filter(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) hvc_iucv_filter_string = val; /* defer... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * param_get_vmidfilter() - Get z/VM user ID filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * @buffer: Buffer to store z/VM user ID filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * (buffer size assumption PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * @kp: Kernel parameter pointing to the hvc_iucv_filter array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * The function stores the filter as a comma-separated list of z/VM user IDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * in @buffer. Typically, sysfs routines call this function for attr show.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) size_t index, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) void *start, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (!MACHINE_IS_VM || !hvc_iucv_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) read_lock_bh(&hvc_iucv_filter_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) for (index = 0; index < hvc_iucv_filter_size; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) start = hvc_iucv_filter + (8 * index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) end = memchr(start, ' ', 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) len = (end) ? end - start : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) memcpy(buffer + rc, start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) rc += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) buffer[rc++] = ',';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) read_unlock_bh(&hvc_iucv_filter_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) buffer[--rc] = '\0'; /* replace last comma and update rc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) #define param_check_vmidfilter(name, p) __param_check(name, p, void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static const struct kernel_param_ops param_ops_vmidfilter = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) .set = param_set_vmidfilter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .get = param_get_vmidfilter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static int __init hvc_iucv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (!hvc_iucv_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (!MACHINE_IS_VM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) pr_notice("The z/VM IUCV HVC device driver cannot "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) "be used without z/VM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) pr_err("%lu is not a valid value for the hvc_iucv= "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) "kernel parameter\n", hvc_iucv_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* register IUCV HVC device driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) rc = driver_register(&hvc_iucv_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) /* parse hvc_iucv_allow string and create z/VM user ID filter list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (hvc_iucv_filter_string) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) pr_err("Allocating memory failed with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) "reason code=%d\n", 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) case -EINVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) pr_err("hvc_iucv_allow= does not specify a valid "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) "z/VM user ID list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) case -ENOSPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) pr_err("hvc_iucv_allow= specifies too many "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) "z/VM user IDs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) sizeof(struct iucv_tty_buffer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (!hvc_iucv_buffer_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) pr_err("Allocating memory failed with reason code=%d\n", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) hvc_iucv_buffer_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (!hvc_iucv_mempool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) pr_err("Allocating memory failed with reason code=%d\n", 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) kmem_cache_destroy(hvc_iucv_buffer_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /* register the first terminal device as console
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * (must be done before allocating hvc terminal devices) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) pr_err("Registering HVC terminal device as "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) "Linux console failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) goto out_error_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /* allocate hvc_iucv_private structs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) for (i = 0; i < hvc_iucv_devices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) pr_err("Creating a new HVC terminal device "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) "failed with error code=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) goto out_error_hvc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /* register IUCV callback handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) rc = iucv_register(&hvc_iucv_handler, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) pr_err("Registering IUCV handlers failed with error code=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) goto out_error_hvc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) out_error_hvc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) for (i = 0; i < hvc_iucv_devices; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (hvc_iucv_table[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) hvc_iucv_destroy(hvc_iucv_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) out_error_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) mempool_destroy(hvc_iucv_mempool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) kmem_cache_destroy(hvc_iucv_buffer_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) kfree(hvc_iucv_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) hvc_iucv_devices = 0; /* ensure that we do not provide any device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * @val: Parameter value (numeric)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) static int __init hvc_iucv_config(char *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (kstrtoul(val, 10, &hvc_iucv_devices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) pr_warn("hvc_iucv= invalid parameter value '%s'\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) device_initcall(hvc_iucv_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) __setup("hvc_iucv=", hvc_iucv_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);