Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * IUCV network driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright IBM Corp. 2001, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Author(s):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *	Original netiucv driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *	Sysfs integration and all bugs therein:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *		Cornelia Huck (cornelia.huck@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *	PM functions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *		Ursula Braun (ursula.braun@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * Documentation used:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *  the source of the original IUCV driver by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *    Stefan Hegewald <hegewald@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *    Hartmut Penner <hpenner@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define KMSG_COMPONENT "netiucv"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <net/dst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <asm/ebcdic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <net/iucv/iucv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include "fsm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) MODULE_AUTHOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)     ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * Debug Facility stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define IUCV_DBF_SETUP_NAME "iucv_setup"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define IUCV_DBF_SETUP_LEN 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define IUCV_DBF_SETUP_PAGES 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define IUCV_DBF_SETUP_NR_AREAS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define IUCV_DBF_SETUP_LEVEL 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define IUCV_DBF_DATA_NAME "iucv_data"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define IUCV_DBF_DATA_LEN 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define IUCV_DBF_DATA_PAGES 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define IUCV_DBF_DATA_NR_AREAS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define IUCV_DBF_DATA_LEVEL 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define IUCV_DBF_TRACE_NAME "iucv_trace"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define IUCV_DBF_TRACE_LEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define IUCV_DBF_TRACE_PAGES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define IUCV_DBF_TRACE_NR_AREAS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define IUCV_DBF_TRACE_LEVEL 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define IUCV_DBF_TEXT(name,level,text) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		debug_text_event(iucv_dbf_##name,level,text); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define IUCV_DBF_HEX(name,level,addr,len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define IUCV_DBF_TEXT_(name, level, text...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 		if (debug_level_enabled(iucv_dbf_##name, level)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 			char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 			sprintf(__buf, text); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 			debug_text_event(iucv_dbf_##name, level, __buf); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 			put_cpu_var(iucv_dbf_txt_buf); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		} \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define IUCV_DBF_SPRINTF(name,level,text...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		debug_sprintf_event(iucv_dbf_trace, level, text ); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * some more debug stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define PRINTK_HEADER " iucv: "       /* for debugging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) static struct device_driver netiucv_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	.name = "netiucv",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	.bus  = &iucv_bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) static void netiucv_callback_connack(struct iucv_path *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static void netiucv_callback_connrej(struct iucv_path *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) static void netiucv_callback_connsusp(struct iucv_path *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) static void netiucv_callback_connres(struct iucv_path *, u8 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) static struct iucv_handler netiucv_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	.path_pending	  = netiucv_callback_connreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	.path_complete	  = netiucv_callback_connack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	.path_severed	  = netiucv_callback_connrej,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	.path_quiesced	  = netiucv_callback_connsusp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	.path_resumed	  = netiucv_callback_connres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	.message_pending  = netiucv_callback_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	.message_complete = netiucv_callback_txdone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * Per connection profiling data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) struct connection_profile {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	unsigned long maxmulti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	unsigned long maxcqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	unsigned long doios_single;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	unsigned long doios_multi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	unsigned long txlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	unsigned long tx_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	unsigned long send_stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	unsigned long tx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	unsigned long tx_max_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * Representation of one iucv connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) struct iucv_connection {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	struct list_head	  list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct iucv_path	  *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	struct sk_buff            *rx_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	struct sk_buff            *tx_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	struct sk_buff_head       collect_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	struct sk_buff_head	  commit_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	spinlock_t                collect_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	int                       collect_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	int                       max_buffsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	fsm_timer                 timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	fsm_instance              *fsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct net_device         *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	struct connection_profile prof;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	char                      userid[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	char			  userdata[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  * Linked list of all connection structs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static LIST_HEAD(iucv_connection_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) static DEFINE_RWLOCK(iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  * Representation of event-data for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * connection state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) struct iucv_event {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	struct iucv_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	void                   *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * Private part of the network device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) struct netiucv_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	struct net_device_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	unsigned long           tbusy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	fsm_instance            *fsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)         struct iucv_connection  *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	struct device           *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  * Link level header for a packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) struct ll_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	u16 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) #define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) #define NETIUCV_BUFSIZE_MAX	 65537
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) #define NETIUCV_MTU_DEFAULT      9216
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #define NETIUCV_QUEUELEN_DEFAULT 50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) #define NETIUCV_TIMEOUT_5SEC     5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  * Compatibility macros for busy handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * of network devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) static void netiucv_clear_busy(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	struct netiucv_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	clear_bit(0, &priv->tbusy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) static int netiucv_test_and_set_busy(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	struct netiucv_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	return test_and_set_bit(0, &priv->tbusy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) static u8 iucvMagic_ascii[16] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static u8 iucvMagic_ebcdic[16] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  * Convert an iucv userId to its printable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * form (strip whitespace at end).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  * @param An iucv userId
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * @returns The printable string (static data!!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static char *netiucv_printname(char *name, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	static char tmp[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	char *p = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	memcpy(tmp, name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	tmp[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	while (*p && ((p - tmp) < len) && (!isspace(*p)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	*p = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static char *netiucv_printuser(struct iucv_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	static char tmp_uid[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	static char tmp_udat[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	static char buf[100];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		tmp_uid[8] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		tmp_udat[16] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		memcpy(tmp_udat, conn->userdata, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		EBCASC(tmp_udat, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		return netiucv_printname(conn->userid, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * States of the interface statemachine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) enum dev_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	DEV_STATE_STOPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	DEV_STATE_STARTWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	DEV_STATE_STOPWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	DEV_STATE_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	 * MUST be always the last element!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	NR_DEV_STATES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) static const char *dev_state_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	"Stopped",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	"StartWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	"StopWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	"Running",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  * Events of the interface statemachine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) enum dev_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	DEV_EVENT_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	DEV_EVENT_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	DEV_EVENT_CONUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	DEV_EVENT_CONDOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	 * MUST be always the last element!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	NR_DEV_EVENTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) static const char *dev_event_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	"Start",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	"Stop",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	"Connection up",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	"Connection down",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  * Events of the connection statemachine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) enum conn_events {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 * Events, representing callbacks from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 * lowlevel iucv layer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	CONN_EVENT_CONN_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	CONN_EVENT_CONN_ACK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	CONN_EVENT_CONN_REJ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	CONN_EVENT_CONN_SUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	CONN_EVENT_CONN_RES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	CONN_EVENT_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	CONN_EVENT_TXDONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	 * Events, representing errors return codes from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	 * calls to lowlevel iucv layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	 * Event, representing timer expiry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	CONN_EVENT_TIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * Events, representing commands from upper levels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	CONN_EVENT_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	CONN_EVENT_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	 * MUST be always the last element!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	NR_CONN_EVENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) static const char *conn_event_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	"Remote connection request",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	"Remote connection acknowledge",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	"Remote connection reject",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	"Connection suspended",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	"Connection resumed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	"Data received",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	"Data sent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	"Timer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	"Start",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	"Stop",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  * States of the connection statemachine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) enum conn_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	 * Connection not assigned to any device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	 * initial state, invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	CONN_STATE_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	 * Userid assigned but not operating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	CONN_STATE_STOPPED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 * Connection registered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 * no connection request sent yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	 * no connection request received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	CONN_STATE_STARTWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	 * Connection registered and connection request sent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	 * no acknowledge and no connection request received yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	CONN_STATE_SETUPWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	 * Connection up and running idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	CONN_STATE_IDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	 * Data sent, awaiting CONN_EVENT_TXDONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	CONN_STATE_TX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	 * Error during registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	CONN_STATE_REGERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 * Error during registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	CONN_STATE_CONNERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	 * MUST be always the last element!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	NR_CONN_STATES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) static const char *conn_state_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	"Invalid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	"Stopped",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	"StartWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	"SetupWait",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	"Idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	"TX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	"Terminating",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	"Registration error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	"Connect error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  * Debug Facility Stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) static debug_info_t *iucv_dbf_setup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) static debug_info_t *iucv_dbf_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) static debug_info_t *iucv_dbf_trace = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) static void iucv_unregister_dbf_views(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	debug_unregister(iucv_dbf_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	debug_unregister(iucv_dbf_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	debug_unregister(iucv_dbf_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) static int iucv_register_dbf_views(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 					IUCV_DBF_SETUP_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 					IUCV_DBF_SETUP_NR_AREAS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 					IUCV_DBF_SETUP_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 				       IUCV_DBF_DATA_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 				       IUCV_DBF_DATA_NR_AREAS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 				       IUCV_DBF_DATA_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 					IUCV_DBF_TRACE_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 					IUCV_DBF_TRACE_NR_AREAS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 					IUCV_DBF_TRACE_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	    (iucv_dbf_trace == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		iucv_unregister_dbf_views();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  * Callback-wrappers, called from lowlevel iucv layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) static void netiucv_callback_rx(struct iucv_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 				struct iucv_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	struct iucv_connection *conn = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct iucv_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	ev.conn = conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	ev.data = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) static void netiucv_callback_txdone(struct iucv_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 				    struct iucv_message *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	struct iucv_connection *conn = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	struct iucv_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	ev.conn = conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	ev.data = msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct iucv_connection *conn = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 				    u8 *ipuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	struct iucv_connection *conn = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	struct iucv_event ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	static char tmp_user[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	static char tmp_udat[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	memcpy(tmp_udat, ipuser, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	EBCASC(tmp_udat, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	read_lock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	list_for_each_entry(conn, &iucv_connection_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		if (strncmp(ipvmid, conn->userid, 8) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		    strncmp(ipuser, conn->userdata, 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		/* Found a matching connection for this path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		conn->path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		ev.conn = conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		ev.data = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		       tmp_user, netiucv_printname(tmp_udat, 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	read_unlock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	struct iucv_connection *conn = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	struct iucv_connection *conn = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	struct iucv_connection *conn = path->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * NOP action for statemachines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  * Actions of the connection statemachine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * netiucv_unpack_skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  * @conn: The connection where this skb has been received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  * @pskb: The received skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  * Unpack a just received skb and hand it over to upper layers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  * Helper function for conn_action_rx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static void netiucv_unpack_skb(struct iucv_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			       struct sk_buff *pskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	struct net_device     *dev = conn->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct netiucv_priv   *privptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	u16 offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	skb_put(pskb, NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	pskb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	pskb->ip_summed = CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	pskb->protocol = cpu_to_be16(ETH_P_IP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		struct ll_header *header = (struct ll_header *) pskb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		if (!header->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		skb_pull(pskb, NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		header->next -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		offset += header->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		header->next -= NETIUCV_HDRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		if (skb_tailroom(pskb) < header->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 				header->next, skb_tailroom(pskb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		skb_put(pskb, header->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		skb_reset_mac_header(pskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		skb = dev_alloc_skb(pskb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			IUCV_DBF_TEXT(data, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 				"Out of memory in netiucv_unpack_skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			privptr->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 					  pskb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		skb->dev = pskb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		skb->protocol = pskb->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		pskb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		privptr->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		privptr->stats.rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		 * Since receiving is always initiated from a tasklet (in iucv.c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		 * we must use netif_rx_ni() instead of netif_rx()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		netif_rx_ni(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		skb_pull(pskb, header->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		skb_put(pskb, NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) static void conn_action_rx(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	struct iucv_event *ev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	struct iucv_connection *conn = ev->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	struct iucv_message *msg = ev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	IUCV_DBF_TEXT(trace, 4, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (!conn->netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		iucv_message_reject(conn->path, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		IUCV_DBF_TEXT(data, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			      "Received data for unlinked connection\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	if (msg->length > conn->max_buffsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		iucv_message_reject(conn->path, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		privptr->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			       msg->length, conn->max_buffsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	conn->rx_buff->data = conn->rx_buff->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	skb_reset_tail_pointer(conn->rx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	conn->rx_buff->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 				  msg->length, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	if (rc || msg->length < 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		privptr->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	netiucv_unpack_skb(conn, conn->rx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	struct iucv_event *ev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	struct iucv_connection *conn = ev->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	struct iucv_message *msg = ev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	struct iucv_message txmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	struct netiucv_priv *privptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	u32 single_flag = msg->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	u32 txbytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	u32 txpackets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	u32 stat_maxcq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	unsigned long saveflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	struct ll_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	IUCV_DBF_TEXT(trace, 4, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (!conn || !conn->netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		IUCV_DBF_TEXT(data, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			      "Send confirmation for unlinked connection\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	privptr = netdev_priv(conn->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	conn->prof.tx_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (single_flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		if ((skb = skb_dequeue(&conn->commit_queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			refcount_dec(&skb->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			if (privptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 				privptr->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 				privptr->stats.tx_bytes +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 					(skb->len - NETIUCV_HDRLEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 						  - NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	conn->tx_buff->data = conn->tx_buff->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	skb_reset_tail_pointer(conn->tx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	conn->tx_buff->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	spin_lock_irqsave(&conn->collect_lock, saveflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	while ((skb = skb_dequeue(&conn->collect_queue))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		skb_copy_from_linear_data(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 					  skb_put(conn->tx_buff, skb->len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 					  skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		txbytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		txpackets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		stat_maxcq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		refcount_dec(&skb->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	if (conn->collect_len > conn->prof.maxmulti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		conn->prof.maxmulti = conn->collect_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	conn->collect_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	if (conn->tx_buff->len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		fsm_newstate(fi, CONN_STATE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	header.next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	conn->prof.send_stamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	txmsg.class = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	txmsg.tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			       conn->tx_buff->data, conn->tx_buff->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	conn->prof.doios_multi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	conn->prof.txlen += conn->tx_buff->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	conn->prof.tx_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		conn->prof.tx_max_pending = conn->prof.tx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		conn->prof.tx_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		fsm_newstate(fi, CONN_STATE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		if (privptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			privptr->stats.tx_errors += txpackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		if (privptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			privptr->stats.tx_packets += txpackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			privptr->stats.tx_bytes += txbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		if (stat_maxcq > conn->prof.maxcqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			conn->prof.maxcqueue = stat_maxcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	struct iucv_event *ev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	struct iucv_connection *conn = ev->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	struct iucv_path *path = ev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	struct net_device *netdev = conn->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	struct netiucv_priv *privptr = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	conn->path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	path->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	fsm_newstate(fi, CONN_STATE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	netdev->tx_queue_len = conn->path->msglim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	struct iucv_event *ev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	struct iucv_path *path = ev->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	iucv_path_sever(path, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) static void conn_action_connack(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	struct iucv_connection *conn = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	struct net_device *netdev = conn->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	struct netiucv_priv *privptr = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	fsm_deltimer(&conn->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	fsm_newstate(fi, CONN_STATE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	netdev->tx_queue_len = conn->path->msglim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	struct iucv_connection *conn = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	fsm_deltimer(&conn->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	iucv_path_sever(conn->path, conn->userdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	struct iucv_connection *conn = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	struct net_device *netdev = conn->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	struct netiucv_priv *privptr = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	fsm_deltimer(&conn->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	iucv_path_sever(conn->path, conn->userdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 			       "connection\n", netiucv_printuser(conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	IUCV_DBF_TEXT(data, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		      "conn_action_connsever: Remote dropped connection\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) static void conn_action_start(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	struct iucv_connection *conn = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	struct net_device *netdev = conn->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	struct netiucv_priv *privptr = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	 * We must set the state before calling iucv_connect because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * callback handler could be called at any point after the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 * request is sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		netdev->name, netiucv_printuser(conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			       NULL, conn->userdata, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		netdev->tx_queue_len = conn->path->msglim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			     CONN_EVENT_TIMER, conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	case 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		dev_warn(privptr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			"The IUCV device failed to connect to z/VM guest %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			netiucv_printname(conn->userid, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		dev_warn(privptr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			"The IUCV device failed to connect to the peer on z/VM"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			" guest %s\n", netiucv_printname(conn->userid, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	case 13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		dev_err(privptr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			"Connecting the IUCV device would exceed the maximum"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			" number of IUCV connections\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		fsm_newstate(fi, CONN_STATE_CONNERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	case 14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		dev_err(privptr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			"z/VM guest %s has too many IUCV connections"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			" to connect with the IUCV device\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 			netiucv_printname(conn->userid, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		fsm_newstate(fi, CONN_STATE_CONNERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	case 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		dev_err(privptr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			"The IUCV device cannot connect to a z/VM guest with no"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			" IUCV authorization\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		fsm_newstate(fi, CONN_STATE_CONNERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		dev_err(privptr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			"Connecting the IUCV device failed with error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		fsm_newstate(fi, CONN_STATE_CONNERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	kfree(conn->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	conn->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) static void netiucv_purge_skb_queue(struct sk_buff_head *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	while ((skb = skb_dequeue(q))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		refcount_dec(&skb->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) static void conn_action_stop(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct iucv_event *ev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	struct iucv_connection *conn = ev->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	struct net_device *netdev = conn->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	struct netiucv_priv *privptr = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	fsm_deltimer(&conn->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	fsm_newstate(fi, CONN_STATE_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	netiucv_purge_skb_queue(&conn->collect_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	if (conn->path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		iucv_path_sever(conn->path, conn->userdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		kfree(conn->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		conn->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	netiucv_purge_skb_queue(&conn->commit_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) static void conn_action_inval(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	struct iucv_connection *conn = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	struct net_device *netdev = conn->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		netdev->name, conn->userid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) static const fsm_node conn_fsm[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)         { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  * Actions for interface - statemachine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * dev_action_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  * @fi: An instance of an interface statemachine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  * @event: The event, just happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * @arg: Generic pointer, casted from struct net_device * upon call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  * Startup connection by sending CONN_EVENT_START to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static void dev_action_start(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	struct net_device   *dev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	struct netiucv_priv *privptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	fsm_newstate(fi, DEV_STATE_STARTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  * Shutdown connection by sending CONN_EVENT_STOP to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  * @param fi    An instance of an interface statemachine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  * @param event The event, just happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  * @param arg   Generic pointer, casted from struct net_device * upon call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) dev_action_stop(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	struct net_device   *dev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	struct netiucv_priv *privptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	struct iucv_event   ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	ev.conn = privptr->conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	fsm_newstate(fi, DEV_STATE_STOPWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)  * Called from connection statemachine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  * when a connection is up and running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  * @param fi    An instance of an interface statemachine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)  * @param event The event, just happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)  * @param arg   Generic pointer, casted from struct net_device * upon call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) dev_action_connup(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	struct net_device   *dev = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	struct netiucv_priv *privptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	switch (fsm_getstate(fi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		case DEV_STATE_STARTWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			fsm_newstate(fi, DEV_STATE_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			dev_info(privptr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 				"The IUCV device has been connected"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				" successfully to %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				netiucv_printuser(privptr->conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			IUCV_DBF_TEXT(setup, 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 				"connection is up and running\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		case DEV_STATE_STOPWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			IUCV_DBF_TEXT(data, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)  * Called from connection statemachine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)  * when a connection has been shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  * @param fi    An instance of an interface statemachine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  * @param event The event, just happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)  * @param arg   Generic pointer, casted from struct net_device * upon call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) dev_action_conndown(fsm_instance *fi, int event, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	switch (fsm_getstate(fi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		case DEV_STATE_RUNNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			fsm_newstate(fi, DEV_STATE_STARTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		case DEV_STATE_STOPWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			fsm_newstate(fi, DEV_STATE_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static const fsm_node dev_fsm[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  * Transmit a packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  * This is a helper function for netiucv_tx().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * @param conn Connection to be used for sending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  * @param skb Pointer to struct sk_buff of packet to send.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  *            The linklevel header has already been set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  *            by netiucv_tx().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)  * @return 0 on success, -ERRNO on failure. (Never fails.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static int netiucv_transmit_skb(struct iucv_connection *conn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 				struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	struct iucv_message msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	unsigned long saveflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	struct ll_header header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		int l = skb->len + NETIUCV_HDRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		spin_lock_irqsave(&conn->collect_lock, saveflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		if (conn->collect_len + l >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			IUCV_DBF_TEXT(data, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				      "EBUSY from netiucv_transmit_skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			refcount_inc(&skb->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			skb_queue_tail(&conn->collect_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			conn->collect_len += l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		struct sk_buff *nskb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		 * Copy the skb to a new allocated skb in lowmem only if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		 * data is located above 2G in memory or tailroom is < 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 				    NETIUCV_HDRLEN)) >> 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		int copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		if (hi || (skb_tailroom(skb) < 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			if (!nskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 				rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 				skb_reserve(nskb, NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 				skb_put_data(nskb, skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			copied = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		 * skb now is below 2G and has enough room. Add headers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		header.next = nskb->len + NETIUCV_HDRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		header.next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		skb_put_data(nskb, &header, NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		fsm_newstate(conn->fsm, CONN_STATE_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		conn->prof.send_stamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		msg.tag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		msg.class = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		rc = iucv_message_send(conn->path, &msg, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 				       nskb->data, nskb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		conn->prof.doios_single++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		conn->prof.txlen += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		conn->prof.tx_pending++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			conn->prof.tx_max_pending = conn->prof.tx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 			struct netiucv_priv *privptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			conn->prof.tx_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			privptr = netdev_priv(conn->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			if (privptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 				privptr->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			if (copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 				dev_kfree_skb(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 				/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 				 * Remove our headers. They get added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 				 * again on retransmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 				skb_pull(skb, NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			if (copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 				dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			refcount_inc(&nskb->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			skb_queue_tail(&conn->commit_queue, nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)  * Interface API for upper network layers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)  * Open an interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)  * Called from generic network layer when ifconfig up is run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)  * @param dev Pointer to interface struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)  * @return 0 on success, -ERRNO on failure. (Never fails.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static int netiucv_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	struct netiucv_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	fsm_event(priv->fsm, DEV_EVENT_START, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)  * Close an interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)  * Called from generic network layer when ifconfig down is run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  * @param dev Pointer to interface struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  * @return 0 on success, -ERRNO on failure. (Never fails.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static int netiucv_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	struct netiucv_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)  * Start transmission of a packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)  * Called from generic network device layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)  * @param skb Pointer to buffer containing the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)  * @param dev Pointer to interface struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)  * @return 0 if packet consumed, !0 if packet rejected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)  *         Note: If we return !0, then the packet is free'd by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)  *               the generic network layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	struct netiucv_priv *privptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	IUCV_DBF_TEXT(trace, 4, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	 * Some sanity checks ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		privptr->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		IUCV_DBF_TEXT(data, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		privptr->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	 * If connection is not running, try to restart it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	 * and throw away packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		privptr->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		privptr->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		privptr->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	if (netiucv_test_and_set_busy(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	netif_trans_update(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	rc = netiucv_transmit_skb(privptr->conn, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	netiucv_clear_busy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)  * netiucv_stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)  * @dev: Pointer to interface struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)  * Returns interface statistics of a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  * Returns pointer to stats struct of this interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) static struct net_device_stats *netiucv_stats (struct net_device * dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	struct netiucv_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	return &priv->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)  * attributes in sysfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static ssize_t user_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			 char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) static int netiucv_check_user(const char *buf, size_t count, char *username,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			      char *userdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	const char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	p = strchr(buf, '.');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	if ((p && ((count > 26) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		   ((p - buf) > 8) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		   (buf + count - p > 18))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	    (!p && (count > 9))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		if (isalnum(*p) || *p == '$') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			username[i] = toupper(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		if (*p == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			/* trailing lf, grr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		IUCV_DBF_TEXT_(setup, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			       "conn_write: invalid character %02x\n", *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	while (i < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		username[i++] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	username[8] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	if (*p == '.') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		for (i = 0; i < 16 && *p; i++, p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			if (*p == '\n')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			userdata[i] = toupper(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		while (i > 0 && i < 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			userdata[i++] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		memcpy(userdata, iucvMagic_ascii, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	userdata[16] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	ASCEBC(userdata, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static ssize_t user_write(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 			  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	struct net_device *ndev = priv->conn->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	char	username[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	char	userdata[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	int	rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	struct iucv_connection *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	rc = netiucv_check_user(buf, count, username, userdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	if (memcmp(username, priv->conn->userid, 9) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		/* username changed while the interface is active. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	read_lock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	list_for_each_entry(cp, &iucv_connection_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		if (!strncmp(username, cp->userid, 9) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 			read_unlock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 				"already exists\n", netiucv_printuser(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 			return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	read_unlock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	memcpy(priv->conn->userid, username, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	memcpy(priv->conn->userdata, userdata, 17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static DEVICE_ATTR(user, 0644, user_show, user_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 			    char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	struct net_device *ndev = priv->conn->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	unsigned int bs1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	if (count >= 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	rc = kstrtouint(buf, 0, &bs1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	if (rc == -EINVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		IUCV_DBF_TEXT_(setup, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			"buffer_write: buffer size %d too large\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			bs1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if ((ndev->flags & IFF_RUNNING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		IUCV_DBF_TEXT_(setup, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			"buffer_write: buffer size %d too small\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			bs1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		IUCV_DBF_TEXT_(setup, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			"buffer_write: buffer size %d too small\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			bs1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	priv->conn->max_buffsize = bs1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	if (!(ndev->flags & IFF_RUNNING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 			     char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) static ssize_t conn_fsm_show (struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			      struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) static ssize_t maxmulti_show (struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			      struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) static ssize_t maxmulti_write (struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 			       struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			       const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	IUCV_DBF_TEXT(trace, 4, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	priv->conn->prof.maxmulti = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			   char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			    const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	IUCV_DBF_TEXT(trace, 4, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	priv->conn->prof.maxcqueue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 			   char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 			    const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	IUCV_DBF_TEXT(trace, 4, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	priv->conn->prof.doios_single = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 			   char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 			    const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	priv->conn->prof.doios_multi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 			   char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 			    const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	IUCV_DBF_TEXT(trace, 4, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	priv->conn->prof.txlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 			    char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 			     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	IUCV_DBF_TEXT(trace, 4, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	priv->conn->prof.tx_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 			    char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 			     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	IUCV_DBF_TEXT(trace, 4, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	priv->conn->prof.tx_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 			    char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	IUCV_DBF_TEXT(trace, 5, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 			     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	struct netiucv_priv *priv = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	IUCV_DBF_TEXT(trace, 4, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	priv->conn->prof.tx_max_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) static struct attribute *netiucv_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	&dev_attr_buffer.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	&dev_attr_user.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) static struct attribute_group netiucv_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	.attrs = netiucv_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static struct attribute *netiucv_stat_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	&dev_attr_device_fsm_state.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	&dev_attr_connection_fsm_state.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	&dev_attr_max_tx_buffer_used.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	&dev_attr_max_chained_skbs.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	&dev_attr_tx_single_write_ops.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	&dev_attr_tx_multi_write_ops.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	&dev_attr_netto_bytes.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	&dev_attr_max_tx_io_time.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	&dev_attr_tx_pending.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	&dev_attr_tx_max_pending.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) static struct attribute_group netiucv_stat_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	.name  = "stats",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	.attrs = netiucv_stat_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) static const struct attribute_group *netiucv_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	&netiucv_stat_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	&netiucv_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) static int netiucv_register_device(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	struct netiucv_priv *priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		dev_set_name(dev, "net%s", ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		dev->bus = &iucv_bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		dev->parent = iucv_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		dev->groups = netiucv_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		 * The release function could be called after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		 * module has been unloaded. It's _only_ task is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		 * free the struct. Therefore, we specify kfree()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		 * directly here. (Probably a little bit obfuscating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		 * but legitime ...).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		dev->release = (void (*)(struct device *))kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		dev->driver = &netiucv_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	ret = device_register(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		put_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	priv->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	dev_set_drvdata(dev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static void netiucv_unregister_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	device_unregister(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  * Allocate and initialize a new connection structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  * Add it to the list of netiucv connections;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 						      char *username,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 						      char *userdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	struct iucv_connection *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	if (!conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	skb_queue_head_init(&conn->collect_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	skb_queue_head_init(&conn->commit_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	spin_lock_init(&conn->collect_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	conn->netdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	if (!conn->rx_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		goto out_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	if (!conn->tx_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		goto out_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	conn->fsm = init_fsm("netiucvconn", conn_state_names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			     conn_event_names, NR_CONN_STATES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	if (!conn->fsm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		goto out_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	fsm_settimer(conn->fsm, &conn->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	if (userdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		memcpy(conn->userdata, userdata, 17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	if (username) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		memcpy(conn->userid, username, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	write_lock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	list_add_tail(&conn->list, &iucv_connection_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	write_unlock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	return conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) out_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	kfree_skb(conn->tx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) out_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	kfree_skb(conn->rx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) out_conn:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	kfree(conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  * Release a connection structure and remove it from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  * list of netiucv connections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) static void netiucv_remove_connection(struct iucv_connection *conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	write_lock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	list_del_init(&conn->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	write_unlock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	fsm_deltimer(&conn->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	netiucv_purge_skb_queue(&conn->collect_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	if (conn->path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		iucv_path_sever(conn->path, conn->userdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		kfree(conn->path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		conn->path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	netiucv_purge_skb_queue(&conn->commit_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	kfree_fsm(conn->fsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	kfree_skb(conn->rx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	kfree_skb(conn->tx_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)  * Release everything of a net device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) static void netiucv_free_netdevice(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	struct netiucv_priv *privptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	if (privptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		if (privptr->conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			netiucv_remove_connection(privptr->conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		if (privptr->fsm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			kfree_fsm(privptr->fsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		privptr->conn = NULL; privptr->fsm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		/* privptr gets freed by free_netdev() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)  * Initialize a net device. (Called from kernel in alloc_netdev())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) static const struct net_device_ops netiucv_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	.ndo_open		= netiucv_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	.ndo_stop		= netiucv_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	.ndo_get_stats		= netiucv_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	.ndo_start_xmit		= netiucv_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) static void netiucv_setup_netdevice(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	dev->mtu	         = NETIUCV_MTU_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	dev->min_mtu		 = 576;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	dev->max_mtu		 = NETIUCV_MTU_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	dev->needs_free_netdev   = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	dev->priv_destructor     = netiucv_free_netdevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	dev->hard_header_len     = NETIUCV_HDRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	dev->addr_len            = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	dev->type                = ARPHRD_SLIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	dev->netdev_ops		 = &netiucv_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)  * Allocate and initialize everything of a net device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	struct netiucv_priv *privptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 			   NET_NAME_UNKNOWN, netiucv_setup_netdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	if (dev_alloc_name(dev, dev->name) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		goto out_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	privptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	if (!privptr->fsm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		goto out_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	privptr->conn = netiucv_new_connection(dev, username, userdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	if (!privptr->conn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		goto out_fsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) out_fsm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	kfree_fsm(privptr->fsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) out_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) static ssize_t connection_store(struct device_driver *drv, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 				size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	char username[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	char userdata[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	struct netiucv_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	struct iucv_connection *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	rc = netiucv_check_user(buf, count, username, userdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	read_lock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	list_for_each_entry(cp, &iucv_connection_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		if (!strncmp(username, cp->userid, 9) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		    !strncmp(userdata, cp->userdata, 17)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			read_unlock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 				"already exists\n", netiucv_printuser(cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	read_unlock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	dev = netiucv_init_netdevice(username, userdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	rc = netiucv_register_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		IUCV_DBF_TEXT_(setup, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			"ret %d from netiucv_register_device\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		goto out_free_ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	/* sysfs magic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	SET_NETDEV_DEV(dev, priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	rc = register_netdevice(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		goto out_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	dev_info(priv->dev, "The IUCV interface to %s has been established "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			    "successfully\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		netiucv_printuser(priv->conn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) out_unreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	netiucv_unregister_device(priv->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) out_free_ndev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	netiucv_free_netdevice(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) static DRIVER_ATTR_WO(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) static ssize_t remove_store(struct device_driver *drv, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 			    size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	struct iucv_connection *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)         struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)         struct netiucv_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)         struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)         char name[IFNAMSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	const char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)         int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)         if (count >= IFNAMSIZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)                 count = IFNAMSIZ - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	for (i = 0, p = buf; i < count && *p; i++, p++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		if (*p == '\n' || *p == ' ')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)                         /* trailing lf, grr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)                         break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		name[i] = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)         name[i] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	read_lock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	list_for_each_entry(cp, &iucv_connection_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		ndev = cp->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)                 dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		if (strncmp(name, ndev->name, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		read_unlock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)                 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 			dev_warn(dev, "The IUCV device is connected"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 				" to %s and cannot be removed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 				priv->conn->userid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 			return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)                 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)                 unregister_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)                 netiucv_unregister_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)                 return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	read_unlock_bh(&iucv_connection_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)         return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) static DRIVER_ATTR_WO(remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) static struct attribute * netiucv_drv_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	&driver_attr_connection.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	&driver_attr_remove.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) static struct attribute_group netiucv_drv_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	.attrs = netiucv_drv_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) static const struct attribute_group *netiucv_drv_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	&netiucv_drv_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static void netiucv_banner(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	pr_info("driver initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) static void __exit netiucv_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	struct iucv_connection *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	struct netiucv_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	while (!list_empty(&iucv_connection_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		cp = list_entry(iucv_connection_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 				struct iucv_connection, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		ndev = cp->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		priv = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		dev = priv->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		unregister_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		netiucv_unregister_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	driver_unregister(&netiucv_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	iucv_unregister(&netiucv_handler, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	iucv_unregister_dbf_views();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	pr_info("driver unloaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) static int __init netiucv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	rc = iucv_register_dbf_views();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	rc = iucv_register(&netiucv_handler, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		goto out_dbf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	IUCV_DBF_TEXT(trace, 3, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	netiucv_driver.groups = netiucv_drv_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	rc = driver_register(&netiucv_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		goto out_iucv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	netiucv_banner();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) out_iucv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	iucv_unregister(&netiucv_handler, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) out_dbf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	iucv_unregister_dbf_views();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) module_init(netiucv_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) module_exit(netiucv_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) MODULE_LICENSE("GPL");