^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IUCV protocol stack for Linux on zSeries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 2006, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * PM functions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Ursula Braun <ursula.braun@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define KMSG_COMPONENT "af_iucv"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/limits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/ebcdic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/cpcmd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <net/iucv/af_iucv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define VERSION "1.2"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static char iucv_userid[80];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static struct proto iucv_proto = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) .name = "AF_IUCV",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .obj_size = sizeof(struct iucv_sock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static struct iucv_interface *pr_iucv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* special AF_IUCV IPRM messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static const u8 iprm_shutdown[8] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define TRGCLS_SIZE sizeof_field(struct iucv_message, class)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define __iucv_sock_wait(sk, condition, timeo, ret) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) DEFINE_WAIT(__wait); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) long __timeo = timeo; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ret = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) while (!(condition)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!__timeo) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ret = -EAGAIN; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (signal_pending(current)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) ret = sock_intr_errno(__timeo); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) release_sock(sk); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) __timeo = schedule_timeout(__timeo); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) lock_sock(sk); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ret = sock_error(sk); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (ret) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) finish_wait(sk_sleep(sk), &__wait); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define iucv_sock_wait(sk, condition, timeo) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int __ret = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!(condition)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __iucv_sock_wait(sk, condition, timeo, __ret); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static struct sock *iucv_accept_dequeue(struct sock *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct socket *newsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static void iucv_sock_kill(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void iucv_sock_close(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Call Back functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static void iucv_callback_connack(struct iucv_path *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) static void iucv_callback_connrej(struct iucv_path *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static void iucv_callback_shutdown(struct iucv_path *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static struct iucv_sock_list iucv_sk_list = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .autobind_name = ATOMIC_INIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static struct iucv_handler af_iucv_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) .path_pending = iucv_callback_connreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) .path_complete = iucv_callback_connack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .path_severed = iucv_callback_connrej,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) .message_pending = iucv_callback_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) .message_complete = iucv_callback_txdone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) .path_quiesced = iucv_callback_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline void high_nmcpy(unsigned char *dst, char *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) memcpy(dst, src, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline void low_nmcpy(unsigned char *dst, char *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) memcpy(&dst[8], src, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * iucv_msg_length() - Returns the length of an iucv message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * @msg: Pointer to struct iucv_message, MUST NOT be NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * The function returns the length of the specified iucv message @msg of data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * stored in a buffer and of data stored in the parameter list (PRMDATA).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * PRMDATA[0..6] socket data (max 7 bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * The socket data length is computed by subtracting the socket data length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * value from 0xFF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * If the socket data len is greater 7, then PRMDATA can be used for special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * notifications (see iucv_sock_shutdown); and further,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * if the socket data len is > 7, the function returns 8.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Use this function to allocate socket buffers to store iucv message data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline size_t iucv_msg_length(struct iucv_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) size_t datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (msg->flags & IUCV_IPRMDATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) datalen = 0xff - msg->rmmsg[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return (datalen < 8) ? datalen : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return msg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * iucv_sock_in_state() - check for specific states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @sk: sock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * @state: first iucv sk state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * @state: second iucv sk state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Returns true if the socket in either in the first or second state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static int iucv_sock_in_state(struct sock *sk, int state, int state2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return (sk->sk_state == state || sk->sk_state == state2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * iucv_below_msglim() - function to check if messages can be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @sk: sock structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * Returns true if the send queue length is lower than the message limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Always returns true if the socket is not connected (no iucv path for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * checking the message limit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static inline int iucv_below_msglim(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (sk->sk_state != IUCV_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (iucv->transport == AF_IUCV_TRANS_IUCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (atomic_read(&iucv->pendings) <= 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static void iucv_sock_wake_msglim(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct socket_wq *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) wq = rcu_dereference(sk->sk_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (skwq_has_sleeper(wq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) wake_up_interruptible_all(&wq->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * afiucv_hs_send() - send a message through HiperSockets transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct sk_buff *skb, u8 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct iucv_sock *iucv = iucv_sk(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct af_iucv_trans_hdr *phs_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int err, confirm_recv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) phs_hdr = skb_push(skb, sizeof(*phs_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) memset(phs_hdr, 0, sizeof(*phs_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) phs_hdr->magic = ETH_P_AF_IUCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) phs_hdr->version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) phs_hdr->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (flags == AF_IUCV_FLAG_SYN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) phs_hdr->window = iucv->msglimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) confirm_recv = atomic_read(&iucv->msg_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) phs_hdr->window = confirm_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (confirm_recv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (imsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) skb->dev = iucv->hs_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!skb->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) err = -ENETDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (skb->len > skb->dev->mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (sock->sk_type == SOCK_SEQPACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) err = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) skb_trim(skb, skb->dev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) __skb_header_release(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) nskb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (!nskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) skb_queue_tail(&iucv->send_skb_q, nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) err = dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (net_xmit_eval(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) skb_unlink(nskb, &iucv->send_skb_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) kfree_skb(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) atomic_sub(confirm_recv, &iucv->msg_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) WARN_ON(atomic_read(&iucv->msg_recv) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return net_xmit_eval(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static struct sock *__iucv_get_sock_by_name(char *nm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) sk_for_each(sk, &iucv_sk_list.head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void iucv_sock_destruct(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) skb_queue_purge(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) skb_queue_purge(&sk->sk_error_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) sk_mem_reclaim(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!sock_flag(sk, SOCK_DEAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) pr_err("Attempt to release alive iucv socket %p\n", sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) WARN_ON(atomic_read(&sk->sk_rmem_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) WARN_ON(refcount_read(&sk->sk_wmem_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) WARN_ON(sk->sk_wmem_queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) WARN_ON(sk->sk_forward_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* Cleanup Listen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void iucv_sock_cleanup_listen(struct sock *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* Close non-accepted connections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) while ((sk = iucv_accept_dequeue(parent, NULL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) iucv_sock_close(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) iucv_sock_kill(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) parent->sk_state = IUCV_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) write_lock_bh(&l->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) sk_add_node(sk, &l->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) write_unlock_bh(&l->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) write_lock_bh(&l->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) sk_del_node_init(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) write_unlock_bh(&l->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Kill socket (only if zapped and orphaned) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void iucv_sock_kill(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) iucv_sock_unlink(&iucv_sk_list, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) sock_set_flag(sk, SOCK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* Terminate an IUCV path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void iucv_sever_path(struct sock *sk, int with_user_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned char user_data[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct iucv_path *path = iucv->path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (iucv->path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) iucv->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (with_user_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) low_nmcpy(user_data, iucv->src_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) high_nmcpy(user_data, iucv->dst_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ASCEBC(user_data, sizeof(user_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pr_iucv->path_sever(path, user_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) pr_iucv->path_sever(path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) iucv_path_free(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* Send controlling flags through an IUCV socket for HIPER transport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static int iucv_send_ctrl(struct sock *sk, u8 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int blen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) u8 shutdown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) blen = sizeof(struct af_iucv_trans_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) LL_RESERVED_SPACE(iucv->hs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (sk->sk_shutdown & SEND_SHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* controlling flags should be sent anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) shutdown = sk->sk_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) sk->sk_shutdown &= RCV_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) skb = sock_alloc_send_skb(sk, blen, 1, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) skb_reserve(skb, blen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) err = afiucv_hs_send(NULL, sk, skb, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) sk->sk_shutdown = shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* Close an IUCV socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static void iucv_sock_close(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) unsigned long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) case IUCV_LISTEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) iucv_sock_cleanup_listen(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) case IUCV_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (iucv->transport == AF_IUCV_TRANS_HIPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) sk->sk_state = IUCV_DISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) case IUCV_DISCONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) sk->sk_state = IUCV_CLOSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) timeo = sk->sk_lingertime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) timeo = IUCV_DISCONN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) iucv_sock_wait(sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) iucv_sock_in_state(sk, IUCV_CLOSED, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) case IUCV_CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) sk->sk_state = IUCV_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) sk->sk_err = ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) skb_queue_purge(&iucv->send_skb_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) skb_queue_purge(&iucv->backlog_skb_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) iucv_sever_path(sk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (iucv->hs_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dev_put(iucv->hs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) iucv->hs_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) sk->sk_bound_dev_if = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* mark socket for deletion by iucv_sock_kill() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) sock_set_flag(sk, SOCK_ZAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void iucv_sock_init(struct sock *sk, struct sock *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) sk->sk_type = parent->sk_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) security_sk_clone(parent, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct iucv_sock *iucv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) sock_init_data(sock, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) INIT_LIST_HEAD(&iucv->accept_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) spin_lock_init(&iucv->accept_q_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) skb_queue_head_init(&iucv->send_skb_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) INIT_LIST_HEAD(&iucv->message_q.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) spin_lock_init(&iucv->message_q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) skb_queue_head_init(&iucv->backlog_skb_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) iucv->send_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) atomic_set(&iucv->pendings, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) iucv->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) iucv->msglimit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) atomic_set(&iucv->msg_sent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) atomic_set(&iucv->msg_recv, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) iucv->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) iucv->sk_txnotify = afiucv_hs_callback_txnotify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) memset(&iucv->src_user_id , 0, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (pr_iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) iucv->transport = AF_IUCV_TRANS_IUCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) iucv->transport = AF_IUCV_TRANS_HIPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) sk->sk_destruct = iucv_sock_destruct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) sock_reset_flag(sk, SOCK_ZAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) sk->sk_protocol = proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) sk->sk_state = IUCV_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) iucv_sock_link(&iucv_sk_list, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct iucv_sock *par = iucv_sk(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) sock_hold(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) spin_lock_irqsave(&par->accept_q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) spin_unlock_irqrestore(&par->accept_q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) iucv_sk(sk)->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) sk_acceptq_added(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static void iucv_accept_unlink(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) spin_lock_irqsave(&par->accept_q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) list_del_init(&iucv_sk(sk)->accept_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) spin_unlock_irqrestore(&par->accept_q_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) sk_acceptq_removed(iucv_sk(sk)->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) iucv_sk(sk)->parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) sock_put(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static struct sock *iucv_accept_dequeue(struct sock *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct socket *newsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct iucv_sock *isk, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) sk = (struct sock *) isk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (sk->sk_state == IUCV_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) iucv_accept_unlink(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (sk->sk_state == IUCV_CONNECTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) sk->sk_state == IUCV_DISCONN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) !newsock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) iucv_accept_unlink(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (newsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) sock_graft(sk, newsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static void __iucv_auto_name(struct iucv_sock *iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) char name[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) while (__iucv_get_sock_by_name(name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) sprintf(name, "%08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) atomic_inc_return(&iucv_sk_list.autobind_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) memcpy(iucv->src_name, name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Bind an unbound socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) int addr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) char uid[sizeof(sa->siucv_user_id)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct iucv_sock *iucv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* Verify the input sockaddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (addr_len < sizeof(struct sockaddr_iucv) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) addr->sa_family != AF_IUCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (sk->sk_state != IUCV_OPEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) err = -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) write_lock_bh(&iucv_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (__iucv_get_sock_by_name(sa->siucv_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) err = -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (iucv->path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Bind the socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (pr_iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) goto vm_bind; /* VM IUCV transport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /* try hiper transport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) memcpy(uid, sa->siucv_user_id, sizeof(uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ASCEBC(uid, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) for_each_netdev_rcu(&init_net, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!memcmp(dev->perm_addr, uid, 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* Check for unitialized siucv_name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (strncmp(sa->siucv_name, " ", 8) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) __iucv_auto_name(iucv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) memcpy(iucv->src_name, sa->siucv_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) sk->sk_bound_dev_if = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) iucv->hs_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) sk->sk_state = IUCV_BOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) iucv->transport = AF_IUCV_TRANS_HIPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!iucv->msglimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) vm_bind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (pr_iucv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* use local userid for backward compat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) memcpy(iucv->src_name, sa->siucv_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) memcpy(iucv->src_user_id, iucv_userid, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) sk->sk_state = IUCV_BOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) iucv->transport = AF_IUCV_TRANS_IUCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) sk->sk_allocation |= GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (!iucv->msglimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* found no dev to bind */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) done_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* Release the socket list lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) write_unlock_bh(&iucv_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Automatically bind an unbound socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static int iucv_sock_autobind(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (unlikely(!pr_iucv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) memcpy(iucv->src_user_id, iucv_userid, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) iucv->transport = AF_IUCV_TRANS_IUCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) sk->sk_allocation |= GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) write_lock_bh(&iucv_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) __iucv_auto_name(iucv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) write_unlock_bh(&iucv_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (!iucv->msglimit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) unsigned char user_data[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) high_nmcpy(user_data, sa->siucv_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) low_nmcpy(user_data, iucv->src_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ASCEBC(user_data, sizeof(user_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* Create path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) iucv->path = iucv_path_alloc(iucv->msglimit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) IUCV_IPRMDATA, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!iucv->path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) sa->siucv_user_id, NULL, user_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) iucv_path_free(iucv->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) iucv->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) case 0x0b: /* Target communicator is not logged on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) err = -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) case 0x0d: /* Max connections for this guest exceeded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) case 0x0e: /* Max connections for target guest exceeded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) case 0x0f: /* Missing IUCV authorization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) err = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) err = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* Connect an unconnected socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int alen, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (sk->sk_state == IUCV_OPEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) iucv->transport == AF_IUCV_TRANS_HIPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return -EBADFD; /* explicit bind required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (sk->sk_state == IUCV_OPEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) err = iucv_sock_autobind(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* Set the destination information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) memcpy(iucv->dst_name, sa->siucv_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (iucv->transport == AF_IUCV_TRANS_HIPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) err = afiucv_path_connect(sock, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (sk->sk_state != IUCV_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) IUCV_DISCONN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) sock_sndtimeo(sk, flags & O_NONBLOCK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) err = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) iucv_sever_path(sk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* Move a socket into listening state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static int iucv_sock_listen(struct socket *sock, int backlog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (sk->sk_state != IUCV_BOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) sk->sk_max_ack_backlog = backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) sk->sk_ack_backlog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) sk->sk_state = IUCV_LISTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* Accept a pending connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int flags, bool kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct sock *sk = sock->sk, *nsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (sk->sk_state != IUCV_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) err = -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* Wait for an incoming connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) add_wait_queue_exclusive(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (!timeo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) timeo = schedule_timeout(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (sk->sk_state != IUCV_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) err = -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) err = sock_intr_errno(timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) remove_wait_queue(sk_sleep(sk), &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) newsock->state = SS_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) int peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) addr->sa_family = AF_IUCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) memcpy(siucv->siucv_name, iucv->dst_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) memcpy(siucv->siucv_name, iucv->src_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return sizeof(struct sockaddr_iucv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * @path: IUCV path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * @msg: Pointer to a struct iucv_message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * @skb: The socket data to send, skb->len MUST BE <= 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * Send the socket data in the parameter list in the iucv message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * list and the socket data len at index 7 (last byte).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * See also iucv_msg_length().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * Returns the error code from the iucv_message_send() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) u8 prmdata[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) memcpy(prmdata, (void *) skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) prmdata[7] = 0xff - (u8) skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) (void *) prmdata, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) size_t headroom = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) size_t linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct iucv_message txmsg = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct cmsghdr *cmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) int cmsg_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) long timeo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) char user_id[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) char appl_id[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int noblock = msg->msg_flags & MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) err = sock_error(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (msg->msg_flags & MSG_OOB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* SOCK_SEQPACKET: we do not support segmented records */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (sk->sk_shutdown & SEND_SHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) err = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* Return if the socket is not in connected state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (sk->sk_state != IUCV_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* initialize defaults */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) cmsg_done = 0; /* check for duplicate headers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) /* iterate over control messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) for_each_cmsghdr(cmsg, msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (!CMSG_OK(msg, cmsg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (cmsg->cmsg_level != SOL_IUCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (cmsg->cmsg_type & cmsg_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) cmsg_done |= cmsg->cmsg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) switch (cmsg->cmsg_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) case SCM_IUCV_TRGCLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* set iucv message target class */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) memcpy(&txmsg.class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* allocate one skb for each iucv message:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * this is fine for SOCK_SEQPACKET (unless we want to support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * segmented records using the MSG_EOR flag), but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * for SOCK_STREAM we might want to improve it in future */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (iucv->transport == AF_IUCV_TRANS_HIPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) headroom = sizeof(struct af_iucv_trans_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) LL_RESERVED_SPACE(iucv->hs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) linear = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (len < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) linear = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* In nonlinear "classic" iucv skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * reserve space for iucv_array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) headroom = sizeof(struct iucv_array) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) (MAX_SKB_FRAGS + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) linear = PAGE_SIZE - headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) noblock, &err, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (headroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) skb_reserve(skb, headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) skb_put(skb, linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) skb->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) skb->data_len = len - linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* wait if outstanding messages for iucv path has reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) timeo = sock_sndtimeo(sk, noblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* return -ECONNRESET if the socket is no longer connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (sk->sk_state != IUCV_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) err = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /* increment and save iucv message tag for msg_completion cbk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) txmsg.tag = iucv->send_tag++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) IUCV_SKB_CB(skb)->tag = txmsg.tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (iucv->transport == AF_IUCV_TRANS_HIPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) atomic_inc(&iucv->msg_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) err = afiucv_hs_send(&txmsg, sk, skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) atomic_dec(&iucv->msg_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) } else { /* Classic VM IUCV transport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) skb_queue_tail(&iucv->send_skb_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) skb->len <= 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) err = iucv_send_iprm(iucv->path, &txmsg, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* on success: there is no message_complete callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /* for an IPRMDATA msg; remove skb from send queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) skb_unlink(skb, &iucv->send_skb_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /* this error should never happen since the */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* IUCV_IPRMDATA path flag is set... sever path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (err == 0x15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) pr_iucv->path_sever(iucv->path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) skb_unlink(skb, &iucv->send_skb_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) err = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) } else if (skb_is_nonlinear(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct iucv_array *iba = (struct iucv_array *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /* skip iucv_array lying in the headroom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) iba[0].address = (u32)(addr_t)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) iba[0].length = (u32)skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) iba[i + 1].address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) (u32)(addr_t)skb_frag_address(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) iba[i + 1].length = (u32)skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) err = pr_iucv->message_send(iucv->path, &txmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) IUCV_IPBUFLST, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) (void *)iba, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) } else { /* non-IPRM Linear skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) err = pr_iucv->message_send(iucv->path, &txmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 0, 0, (void *)skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (err == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) user_id[8] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) memcpy(user_id, iucv->dst_user_id, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) appl_id[8] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) memcpy(appl_id, iucv->dst_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) pr_err(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) "Application %s on z/VM guest %s exceeds message limit\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) appl_id, user_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) err = -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) skb_unlink(skb, &iucv->send_skb_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) size_t headroom, linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (len < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) headroom = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) linear = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) linear = PAGE_SIZE - headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) skb = alloc_skb_with_frags(headroom + linear, len - linear,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 0, &err, GFP_ATOMIC | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) WARN_ONCE(!skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) len, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (headroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) skb_reserve(skb, headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) skb_put(skb, linear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) skb->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) skb->data_len = len - linear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) /* iucv_process_message() - Receive a single outstanding IUCV message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * Locking: must be called with message_q.lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) struct iucv_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct iucv_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) len = iucv_msg_length(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* store msg target class in the second 4 bytes of skb ctrl buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /* Note: the first 4 bytes are reserved for msg tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) IUCV_SKB_CB(skb)->class = msg->class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) skb->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) skb->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (skb_is_nonlinear(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) struct iucv_array *iba = (struct iucv_array *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) iba[0].address = (u32)(addr_t)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) iba[0].length = (u32)skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) iba[i + 1].address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) (u32)(addr_t)skb_frag_address(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) iba[i + 1].length = (u32)skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) rc = pr_iucv->message_receive(path, msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) IUCV_IPBUFLST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) (void *)iba, len, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) rc = pr_iucv->message_receive(path, msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) msg->flags & IUCV_IPRMDATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) skb->data, len, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) WARN_ON_ONCE(skb->len != len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) IUCV_SKB_CB(skb)->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (sk_filter(sk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) atomic_inc(&sk->sk_drops); /* skb rejected by filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* iucv_process_message_q() - Process outstanding IUCV messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * Locking: must be called with message_q.lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static void iucv_process_message_q(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct sock_msg_q *p, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) iucv_process_message(sk, skb, p->path, &p->msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) list_del(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (!skb_queue_empty(&iucv->backlog_skb_q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) size_t len, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) int noblock = flags & MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) unsigned int copied, rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) struct sk_buff *skb, *rskb, *cskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if ((sk->sk_state == IUCV_DISCONN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) skb_queue_empty(&iucv->backlog_skb_q) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) skb_queue_empty(&sk->sk_receive_queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) list_empty(&iucv->message_q.list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (flags & (MSG_OOB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /* receive/dequeue next skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * the function understands MSG_PEEK and, thus, does not dequeue skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) skb = skb_recv_datagram(sk, flags, noblock, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (sk->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) offset = IUCV_SKB_CB(skb)->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) rlen = skb->len - offset; /* real length of skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) copied = min_t(unsigned int, rlen, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (!rlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) cskb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (!(flags & MSG_PEEK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) skb_queue_head(&sk->sk_receive_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (sk->sk_type == SOCK_SEQPACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (copied < rlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) msg->msg_flags |= MSG_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /* each iucv message contains a complete record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) msg->msg_flags |= MSG_EOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* create control message to store iucv msg target class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * get the trgcls from the control buffer of the skb due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * fragmentation of original iucv message. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) sizeof(IUCV_SKB_CB(skb)->class),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) (void *)&IUCV_SKB_CB(skb)->class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (!(flags & MSG_PEEK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) skb_queue_head(&sk->sk_receive_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* Mark read part of skb as used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (!(flags & MSG_PEEK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* SOCK_STREAM: re-queue skb if it contains unreceived data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (sk->sk_type == SOCK_STREAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (copied < rlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) IUCV_SKB_CB(skb)->offset = offset + copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) skb_queue_head(&sk->sk_receive_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (iucv->transport == AF_IUCV_TRANS_HIPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) atomic_inc(&iucv->msg_recv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) iucv_sock_close(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /* Queue backlog skbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) spin_lock_bh(&iucv->message_q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) rskb = skb_dequeue(&iucv->backlog_skb_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) while (rskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) IUCV_SKB_CB(rskb)->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (__sock_queue_rcv_skb(sk, rskb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /* handle rcv queue full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) skb_queue_head(&iucv->backlog_skb_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) rskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) rskb = skb_dequeue(&iucv->backlog_skb_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (skb_queue_empty(&iucv->backlog_skb_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (!list_empty(&iucv->message_q.list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) iucv_process_message_q(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (atomic_read(&iucv->msg_recv) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) iucv->msglimit / 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) sk->sk_state = IUCV_DISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) spin_unlock_bh(&iucv->message_q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) copied = rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static inline __poll_t iucv_accept_poll(struct sock *parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct iucv_sock *isk, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) sk = (struct sock *) isk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (sk->sk_state == IUCV_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) __poll_t mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) sock_poll_wait(file, sock, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (sk->sk_state == IUCV_LISTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return iucv_accept_poll(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) mask |= EPOLLERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (sk->sk_shutdown & RCV_SHUTDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) mask |= EPOLLRDHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (sk->sk_shutdown == SHUTDOWN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) mask |= EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (!skb_queue_empty(&sk->sk_receive_queue) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) (sk->sk_shutdown & RCV_SHUTDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) mask |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (sk->sk_state == IUCV_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) mask |= EPOLLHUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (sk->sk_state == IUCV_DISCONN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) mask |= EPOLLIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (sock_writeable(sk) && iucv_below_msglim(sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int iucv_sock_shutdown(struct socket *sock, int how)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct iucv_message txmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) how++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if ((how & ~SHUTDOWN_MASK) || !how)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) case IUCV_LISTEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) case IUCV_DISCONN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) case IUCV_CLOSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) case IUCV_CLOSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) sk->sk_state == IUCV_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (iucv->transport == AF_IUCV_TRANS_IUCV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) txmsg.class = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) txmsg.tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) err = pr_iucv->message_send(iucv->path, &txmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) err = -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) sk->sk_shutdown |= how;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) iucv->path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) err = pr_iucv->path_quiesce(iucv->path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /* skb_queue_purge(&sk->sk_receive_queue); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) skb_queue_purge(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /* Wake up anyone sleeping in poll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static int iucv_sock_release(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) iucv_sock_close(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) sock_orphan(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) iucv_sock_kill(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /* getsockopt and setsockopt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) sockptr_t optval, unsigned int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (level != SOL_IUCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (optlen < sizeof(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (copy_from_sockptr(&val, optval, sizeof(int)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) case SO_IPRMDATA_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) iucv->flags |= IUCV_IPRMDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) iucv->flags &= ~IUCV_IPRMDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) case SO_MSGLIMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) switch (sk->sk_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) case IUCV_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) case IUCV_BOUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (val < 1 || val > U16_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) iucv->msglimit = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) rc = -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) char __user *optval, int __user *optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct sock *sk = sock->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (level != SOL_IUCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (get_user(len, optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (len < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) len = min_t(unsigned int, len, sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) case SO_IPRMDATA_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) case SO_MSGLIMIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) : iucv->msglimit; /* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) release_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) case SO_MSGSIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (sk->sk_state == IUCV_OPEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return -EBADFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 0x7fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (put_user(len, optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (copy_to_user(optval, &val, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /* Callback wrappers - called from iucv base support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) static int iucv_callback_connreq(struct iucv_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) u8 ipvmid[8], u8 ipuser[16])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) unsigned char user_data[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) unsigned char nuser_data[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) unsigned char src_name[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct sock *sk, *nsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct iucv_sock *iucv, *niucv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) memcpy(src_name, ipuser, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) EBCASC(src_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* Find out if this path belongs to af_iucv. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) read_lock(&iucv_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) iucv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) sk_for_each(sk, &iucv_sk_list.head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (sk->sk_state == IUCV_LISTEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * Found a listening socket with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * src_name == ipuser[0-7].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) read_unlock(&iucv_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (!iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /* No socket found, not one of our paths. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /* Check if parent socket is listening */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) low_nmcpy(user_data, iucv->src_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) high_nmcpy(user_data, iucv->dst_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) ASCEBC(user_data, sizeof(user_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (sk->sk_state != IUCV_LISTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) err = pr_iucv->path_sever(path, user_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) iucv_path_free(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /* Check for backlog size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (sk_acceptq_is_full(sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) err = pr_iucv->path_sever(path, user_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) iucv_path_free(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /* Create the new socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (!nsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) err = pr_iucv->path_sever(path, user_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) iucv_path_free(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) niucv = iucv_sk(nsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) iucv_sock_init(nsk, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) niucv->transport = AF_IUCV_TRANS_IUCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) nsk->sk_allocation |= GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /* Set the new iucv_sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) memcpy(niucv->dst_name, ipuser + 8, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) EBCASC(niucv->dst_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) memcpy(niucv->dst_user_id, ipvmid, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) memcpy(niucv->src_name, iucv->src_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) memcpy(niucv->src_user_id, iucv->src_user_id, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) niucv->path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) /* Call iucv_accept */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) high_nmcpy(nuser_data, ipuser + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) memcpy(nuser_data + 8, niucv->src_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) ASCEBC(nuser_data + 8, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) /* set message limit for path based on msglimit of accepting socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) niucv->msglimit = iucv->msglimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) path->msglim = iucv->msglimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) iucv_sever_path(nsk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) iucv_sock_kill(nsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) iucv_accept_enqueue(sk, nsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) /* Wake up accept */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) nsk->sk_state = IUCV_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) sk->sk_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) struct sock *sk = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) sk->sk_state = IUCV_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) struct sock *sk = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct sock_msg_q *save_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (sk->sk_shutdown & RCV_SHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) pr_iucv->message_reject(path, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) spin_lock(&iucv->message_q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (!list_empty(&iucv->message_q.list) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) !skb_queue_empty(&iucv->backlog_skb_q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) goto save_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) len = atomic_read(&sk->sk_rmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) len += SKB_TRUESIZE(iucv_msg_length(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (len > sk->sk_rcvbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) goto save_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) goto save_message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) iucv_process_message(sk, skb, path, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) save_message:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (!save_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) save_msg->path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) save_msg->msg = *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) list_add_tail(&save_msg->list, &iucv->message_q.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) spin_unlock(&iucv->message_q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) static void iucv_callback_txdone(struct iucv_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct iucv_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct sock *sk = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct sk_buff *this = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct sk_buff *list_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) spin_lock_irqsave(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) skb_queue_walk(list, list_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) this = list_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (this)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) __skb_unlink(this, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) spin_unlock_irqrestore(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (this) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) kfree_skb(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) /* wake up any process waiting for sending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) iucv_sock_wake_msglim(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (sk->sk_state == IUCV_CLOSING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) sk->sk_state = IUCV_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) struct sock *sk = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (sk->sk_state == IUCV_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) iucv_sever_path(sk, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) sk->sk_state = IUCV_DISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /* called if the other communication side shuts down its RECV direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) struct sock *sk = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (sk->sk_state != IUCV_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) sk->sk_shutdown |= SEND_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) /***************** HiperSockets transport callbacks ********************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) static void afiucv_swap_src_dest(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) char tmpID[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) char tmpName[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) memcpy(tmpID, trans_hdr->srcUserID, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) memcpy(tmpName, trans_hdr->srcAppName, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) memcpy(trans_hdr->destUserID, tmpID, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) memcpy(trans_hdr->destAppName, tmpName, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) skb_push(skb, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) memset(skb->data, 0, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * afiucv_hs_callback_syn - react on received SYN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct sock *nsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) struct iucv_sock *iucv, *niucv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (!iucv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /* no sock - connection refused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) afiucv_swap_src_dest(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) err = dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if ((sk->sk_state != IUCV_LISTEN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) sk_acceptq_is_full(sk) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) !nsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /* error on server socket - connection refused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) afiucv_swap_src_dest(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) err = dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) iucv_sock_kill(nsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) niucv = iucv_sk(nsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) iucv_sock_init(nsk, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) niucv->transport = AF_IUCV_TRANS_HIPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) niucv->msglimit = iucv->msglimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (!trans_hdr->window)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) niucv->msglimit_peer = trans_hdr->window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) memcpy(niucv->src_name, iucv->src_name, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) memcpy(niucv->src_user_id, iucv->src_user_id, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) niucv->hs_dev = iucv->hs_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) dev_hold(niucv->hs_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) afiucv_swap_src_dest(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) trans_hdr->window = niucv->msglimit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /* if receiver acks the xmit connection is established */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) err = dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) iucv_accept_enqueue(sk, nsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) nsk->sk_state = IUCV_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) sk->sk_data_ready(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) iucv_sock_kill(nsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * afiucv_hs_callback_synack() - react on received SYN-ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (!iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (sk->sk_state != IUCV_BOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) sk->sk_state = IUCV_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * afiucv_hs_callback_synfin() - react on received SYN_FIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (!iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (sk->sk_state != IUCV_BOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) sk->sk_state = IUCV_DISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * afiucv_hs_callback_fin() - react on received FIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) /* other end of connection closed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (!iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) bh_lock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (sk->sk_state == IUCV_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) sk->sk_state = IUCV_DISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) bh_unlock_sock(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * afiucv_hs_callback_win() - react on received WIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (!iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (sk->sk_state != IUCV_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) iucv_sock_wake_msglim(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * afiucv_hs_callback_rx() - react on received data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct iucv_sock *iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (!iucv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (sk->sk_state != IUCV_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (sk->sk_shutdown & RCV_SHUTDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) /* write stuff from iucv_msg to skb cb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) IUCV_SKB_CB(skb)->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if (sk_filter(sk, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) atomic_inc(&sk->sk_drops); /* skb rejected by filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) spin_lock(&iucv->message_q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (skb_queue_empty(&iucv->backlog_skb_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (__sock_queue_rcv_skb(sk, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) /* handle rcv queue full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) skb_queue_tail(&iucv->backlog_skb_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) spin_unlock(&iucv->message_q.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * afiucv_hs_rcv() - base function for arriving data through HiperSockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * called from netif RX softirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) struct packet_type *pt, struct net_device *orig_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) struct iucv_sock *iucv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) struct af_iucv_trans_hdr *trans_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) int err = NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) char nullstring[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) return NET_RX_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) trans_hdr = iucv_trans_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) memset(nullstring, 0, sizeof(nullstring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) iucv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) read_lock(&iucv_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) sk_for_each(sk, &iucv_sk_list.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) if ((!memcmp(&iucv_sk(sk)->src_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) trans_hdr->destAppName, 8)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) (!memcmp(&iucv_sk(sk)->src_user_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) trans_hdr->destUserID, 8)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) (!memcmp(&iucv_sk(sk)->dst_user_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) nullstring, 8))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if ((!memcmp(&iucv_sk(sk)->src_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) trans_hdr->destAppName, 8)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) (!memcmp(&iucv_sk(sk)->src_user_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) trans_hdr->destUserID, 8)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) (!memcmp(&iucv_sk(sk)->dst_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) trans_hdr->srcAppName, 8)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) (!memcmp(&iucv_sk(sk)->dst_user_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) trans_hdr->srcUserID, 8))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) read_unlock(&iucv_sk_list.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (!iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) /* no sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) how should we send with no sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 1) send without sock no send rc checking?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 2) introduce default sock to handle this cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) data -> send FIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) SYN|ACK, SYN|FIN, FIN -> no action? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) switch (trans_hdr->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) case AF_IUCV_FLAG_SYN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) /* connect request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) err = afiucv_hs_callback_syn(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) /* connect request confirmed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) err = afiucv_hs_callback_synack(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) /* connect request refused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) err = afiucv_hs_callback_synfin(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) case (AF_IUCV_FLAG_FIN):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* close request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) err = afiucv_hs_callback_fin(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) case (AF_IUCV_FLAG_WIN):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) err = afiucv_hs_callback_win(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) fallthrough; /* and receive non-zero length data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) case (AF_IUCV_FLAG_SHT):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /* shutdown request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) fallthrough; /* and receive zero length data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) /* plain data frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) err = afiucv_hs_callback_rx(sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) enum iucv_tx_notify n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) struct sock *isk = skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct sock *sk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) struct iucv_sock *iucv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) struct sk_buff_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) struct sk_buff *list_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) read_lock_irqsave(&iucv_sk_list.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) sk_for_each(sk, &iucv_sk_list.head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) if (sk == isk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) read_unlock_irqrestore(&iucv_sk_list.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (!iucv || sock_flag(sk, SOCK_ZAPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) list = &iucv->send_skb_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) spin_lock_irqsave(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) skb_queue_walk_safe(list, list_skb, nskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) switch (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) case TX_NOTIFY_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) __skb_unlink(list_skb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) kfree_skb(list_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) iucv_sock_wake_msglim(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) case TX_NOTIFY_PENDING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) atomic_inc(&iucv->pendings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) case TX_NOTIFY_DELAYED_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) __skb_unlink(list_skb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) atomic_dec(&iucv->pendings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (atomic_read(&iucv->pendings) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) iucv_sock_wake_msglim(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) kfree_skb(list_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) case TX_NOTIFY_UNREACHABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) case TX_NOTIFY_DELAYED_UNREACHABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) case TX_NOTIFY_TPQFULL: /* not yet used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) case TX_NOTIFY_GENERALERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) case TX_NOTIFY_DELAYED_GENERALERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) __skb_unlink(list_skb, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) kfree_skb(list_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (sk->sk_state == IUCV_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) sk->sk_state = IUCV_DISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) spin_unlock_irqrestore(&list->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (sk->sk_state == IUCV_CLOSING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) sk->sk_state = IUCV_CLOSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) * afiucv_netdev_event: handle netdev notifier chain events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) static int afiucv_netdev_event(struct notifier_block *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) unsigned long event, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) struct iucv_sock *iucv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) case NETDEV_REBOOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) case NETDEV_GOING_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) sk_for_each(sk, &iucv_sk_list.head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) iucv = iucv_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if ((iucv->hs_dev == event_dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) (sk->sk_state == IUCV_CONNECTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (event == NETDEV_GOING_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) sk->sk_state = IUCV_DISCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) sk->sk_state_change(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) case NETDEV_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) case NETDEV_UNREGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) static struct notifier_block afiucv_netdev_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) .notifier_call = afiucv_netdev_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) static const struct proto_ops iucv_sock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) .family = PF_IUCV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) .release = iucv_sock_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) .bind = iucv_sock_bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) .connect = iucv_sock_connect,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) .listen = iucv_sock_listen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) .accept = iucv_sock_accept,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) .getname = iucv_sock_getname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) .sendmsg = iucv_sock_sendmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) .recvmsg = iucv_sock_recvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) .poll = iucv_sock_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) .ioctl = sock_no_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) .mmap = sock_no_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) .socketpair = sock_no_socketpair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) .shutdown = iucv_sock_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) .setsockopt = iucv_sock_setsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) .getsockopt = iucv_sock_getsockopt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) int kern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) struct sock *sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (protocol && protocol != PF_IUCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) sock->state = SS_UNCONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) switch (sock->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) case SOCK_STREAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) case SOCK_SEQPACKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) /* currently, proto ops can handle both sk types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) sock->ops = &iucv_sock_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return -ESOCKTNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (!sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) iucv_sock_init(sk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) static const struct net_proto_family iucv_sock_family_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) .family = AF_IUCV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) .create = iucv_sock_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) static struct packet_type iucv_packet_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) .type = cpu_to_be16(ETH_P_AF_IUCV),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) .func = afiucv_hs_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) static int afiucv_iucv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return pr_iucv->iucv_register(&af_iucv_handler, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) static void afiucv_iucv_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) pr_iucv->iucv_unregister(&af_iucv_handler, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) static int __init afiucv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (MACHINE_IS_VM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) WARN_ON(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) err = -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (!pr_iucv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) printk(KERN_WARNING "iucv_if lookup failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) memset(&iucv_userid, 0, sizeof(iucv_userid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) memset(&iucv_userid, 0, sizeof(iucv_userid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) pr_iucv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) err = proto_register(&iucv_proto, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) err = sock_register(&iucv_sock_family_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) goto out_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (pr_iucv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) err = afiucv_iucv_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) goto out_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) err = register_netdevice_notifier(&afiucv_netdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) goto out_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) dev_add_pack(&iucv_packet_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) out_notifier:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) if (pr_iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) afiucv_iucv_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) out_sock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) sock_unregister(PF_IUCV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) out_proto:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) proto_unregister(&iucv_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (pr_iucv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) symbol_put(iucv_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) static void __exit afiucv_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (pr_iucv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) afiucv_iucv_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) symbol_put(iucv_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) unregister_netdevice_notifier(&afiucv_netdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) dev_remove_pack(&iucv_packet_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) sock_unregister(PF_IUCV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) proto_unregister(&iucv_proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) module_init(afiucv_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) module_exit(afiucv_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) MODULE_VERSION(VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) MODULE_ALIAS_NETPROTO(PF_IUCV);