Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  Linux for S/390 Lan Channel Station Network Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright IBM Corp. 1999, 2009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  Author(s): Original Code written by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *			DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *	       Rewritten by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *			Frank Pavlic <fpavlic@de.ibm.com> and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *			Martin Schwidefsky <schwidefsky@de.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #define KMSG_COMPONENT		"lcs"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/fddidevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/igmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <net/arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/idals.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <asm/timex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <asm/ccwgroup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "lcs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #error Cannot compile lcs.c without some net devices switched on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * initialization string for output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) static char version[] __initdata = "LCS driver";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)   * the root device for lcs group devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static struct device *lcs_root_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * Some prototypes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static void lcs_tasklet(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) static void lcs_start_kernel_thread(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #ifdef CONFIG_IP_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #endif /* CONFIG_IP_MULTICAST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static int lcs_recovery(void *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  * Debug Facility Stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static char debug_buffer[255];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static debug_info_t *lcs_dbf_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static debug_info_t *lcs_dbf_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  *  LCS Debug Facility functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) lcs_unregister_debug_facility(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	debug_unregister(lcs_dbf_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	debug_unregister(lcs_dbf_trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) lcs_register_debug_facility(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		pr_err("Not enough memory for debug facility.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		lcs_unregister_debug_facility();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	debug_set_level(lcs_dbf_setup, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	debug_set_level(lcs_dbf_trace, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * Allocate io buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) lcs_alloc_channel(struct lcs_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	LCS_DBF_TEXT(2, setup, "ichalloc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		/* alloc memory fo iobuffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		channel->iob[cnt].data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		if (channel->iob[cnt].data == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	if (cnt < LCS_NUM_BUFFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		/* Not all io buffers could be allocated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		LCS_DBF_TEXT(2, setup, "echalloc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		while (cnt-- > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			kfree(channel->iob[cnt].data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * Free io buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) lcs_free_channel(struct lcs_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	LCS_DBF_TEXT(2, setup, "ichfree");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		kfree(channel->iob[cnt].data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		channel->iob[cnt].data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * Cleanup channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) lcs_cleanup_channel(struct lcs_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	LCS_DBF_TEXT(3, setup, "cleanch");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	/* Kill write channel tasklets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	tasklet_kill(&channel->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	/* Free channel buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	lcs_free_channel(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * LCS free memory for card and channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) lcs_free_card(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	LCS_DBF_TEXT(2, setup, "remcard");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	kfree(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * LCS alloc memory for card and channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static struct lcs_card *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) lcs_alloc_card(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	LCS_DBF_TEXT(2, setup, "alloclcs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	if (card == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	card->lan_type = LCS_FRAME_TYPE_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	card->pkt_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	/* Allocate io buffers for the read channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	rc = lcs_alloc_channel(&card->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	if (rc){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		LCS_DBF_TEXT(2, setup, "iccwerr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		lcs_free_card(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	/* Allocate io buffers for the write channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	rc = lcs_alloc_channel(&card->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		LCS_DBF_TEXT(2, setup, "iccwerr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		lcs_cleanup_channel(&card->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		lcs_free_card(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) #ifdef CONFIG_IP_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	INIT_LIST_HEAD(&card->ipm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	return card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  * Setup read channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) lcs_setup_read_ccws(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	LCS_DBF_TEXT(2, setup, "ireadccw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	/* Setup read ccws. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		card->read.ccws[cnt].flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 			CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		 * Note: we have allocated the buffer with GFP_DMA, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		 * we do not need to do set_normalized_cda.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		card->read.ccws[cnt].cda =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			(__u32) __pa(card->read.iob[cnt].data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		((struct lcs_header *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		card->read.iob[cnt].callback = lcs_get_frames_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		card->read.iob[cnt].state = LCS_BUF_STATE_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	/* Last ccw is a tic (transfer in channel). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	card->read.ccws[LCS_NUM_BUFFS].cda =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		(__u32) __pa(card->read.ccws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	/* Setg initial state of the read channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	card->read.state = LCS_CH_STATE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	card->read.io_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	card->read.buf_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) lcs_setup_read(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	LCS_DBF_TEXT(3, setup, "initread");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	lcs_setup_read_ccws(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	/* Initialize read channel tasklet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	card->read.irq_tasklet.data = (unsigned long) &card->read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	card->read.irq_tasklet.func = lcs_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	/* Initialize waitqueue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	init_waitqueue_head(&card->read.wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  * Setup write channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) lcs_setup_write_ccws(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	LCS_DBF_TEXT(3, setup, "iwritccw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	/* Setup write ccws. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		card->write.ccws[cnt].count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		card->write.ccws[cnt].flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		 * Note: we have allocated the buffer with GFP_DMA, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		 * we do not need to do set_normalized_cda.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		card->write.ccws[cnt].cda =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 			(__u32) __pa(card->write.iob[cnt].data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	/* Last ccw is a tic (transfer in channel). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	card->write.ccws[LCS_NUM_BUFFS].cda =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		(__u32) __pa(card->write.ccws);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	/* Set initial state of the write channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	card->read.state = LCS_CH_STATE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	card->write.io_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	card->write.buf_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) lcs_setup_write(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	LCS_DBF_TEXT(3, setup, "initwrit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	lcs_setup_write_ccws(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	/* Initialize write channel tasklet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	card->write.irq_tasklet.data = (unsigned long) &card->write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	card->write.irq_tasklet.func = lcs_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	/* Initialize waitqueue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	init_waitqueue_head(&card->write.wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	spin_lock_irqsave(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	card->thread_allowed_mask = threads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	spin_unlock_irqrestore(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	wake_up(&card->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static int lcs_threads_running(struct lcs_card *card, unsigned long threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)         unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)         int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	spin_lock_irqsave(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)         rc = (card->thread_running_mask & threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	spin_unlock_irqrestore(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)         return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)         return wait_event_interruptible(card->wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)                         lcs_threads_running(card, threads) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)         unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	spin_lock_irqsave(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)         if ( !(card->thread_allowed_mask & thread) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)               (card->thread_start_mask & thread) ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)                 spin_unlock_irqrestore(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)                 return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)         card->thread_start_mask |= thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	spin_unlock_irqrestore(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)         return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)         unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	spin_lock_irqsave(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)         card->thread_running_mask &= ~thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	spin_unlock_irqrestore(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)         wake_up(&card->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)         unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)         int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	spin_lock_irqsave(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)         if (card->thread_start_mask & thread){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)                 if ((card->thread_allowed_mask & thread) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)                     !(card->thread_running_mask & thread)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)                         rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)                         card->thread_start_mask &= ~thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)                         card->thread_running_mask |= thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)                 } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)                         rc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	spin_unlock_irqrestore(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)         return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)         int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)         wait_event(card->wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)                    (rc = __lcs_do_run_thread(card, thread)) >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)         return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)         unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)         int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	spin_lock_irqsave(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396)         LCS_DBF_TEXT_(4, trace, "  %02x%02x%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)                         (u8) card->thread_start_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)                         (u8) card->thread_allowed_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)                         (u8) card->thread_running_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)         rc = (card->thread_start_mask & thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	spin_unlock_irqrestore(&card->mask_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)         return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  * Initialize channels,card and state machines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) lcs_setup_card(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	LCS_DBF_TEXT(2, setup, "initcard");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	LCS_DBF_HEX(2, setup, &card, sizeof(void*));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	lcs_setup_read(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	lcs_setup_write(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	/* Set cards initial state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	card->state = DEV_STATE_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	card->tx_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	card->tx_emitted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	init_waitqueue_head(&card->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	spin_lock_init(&card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	spin_lock_init(&card->ipm_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	spin_lock_init(&card->mask_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) #ifdef CONFIG_IP_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	INIT_LIST_HEAD(&card->ipm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	INIT_LIST_HEAD(&card->lancmd_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) static void lcs_clear_multicast_list(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) #ifdef	CONFIG_IP_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	struct lcs_ipm_list *ipm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	/* Free multicast list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	LCS_DBF_TEXT(3, setup, "clmclist");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	spin_lock_irqsave(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	while (!list_empty(&card->ipm_list)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		ipm = list_entry(card->ipm_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 				 struct lcs_ipm_list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		list_del(&ipm->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			spin_unlock_irqrestore(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			lcs_send_delipm(card, ipm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			spin_lock_irqsave(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		kfree(ipm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	spin_unlock_irqrestore(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  * Cleanup channels,card and state machines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) lcs_cleanup_card(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	LCS_DBF_TEXT(3, setup, "cleancrd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	LCS_DBF_HEX(2,setup,&card,sizeof(void*));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (card->dev != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		free_netdev(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	/* Cleanup channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	lcs_cleanup_channel(&card->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	lcs_cleanup_channel(&card->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472)  * Start channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) lcs_start_channel(struct lcs_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	rc = ccw_device_start(channel->ccwdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			      channel->ccws + channel->io_idx, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			      DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		channel->state = LCS_CH_STATE_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		LCS_DBF_TEXT_(4,trace,"essh%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			      dev_name(&channel->ccwdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		dev_err(&channel->ccwdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			"Starting an LCS device resulted in an error,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			" rc=%d!\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) lcs_clear_channel(struct lcs_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	LCS_DBF_TEXT(4,trace,"clearch");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	rc = ccw_device_clear(channel->ccwdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		LCS_DBF_TEXT_(4, trace, "ecsc%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			      dev_name(&channel->ccwdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	channel->state = LCS_CH_STATE_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  * Stop channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) lcs_stop_channel(struct lcs_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	if (channel->state == LCS_CH_STATE_STOPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	LCS_DBF_TEXT(4,trace,"haltsch");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	channel->state = LCS_CH_STATE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	rc = ccw_device_halt(channel->ccwdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		LCS_DBF_TEXT_(4, trace, "ehsc%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			      dev_name(&channel->ccwdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	/* Asynchronous halt initialted. Wait for its completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	lcs_clear_channel(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549)  * start read and write channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) lcs_start_channels(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	LCS_DBF_TEXT(2, trace, "chstart");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	/* start read channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	rc = lcs_start_channel(&card->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	/* start write channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	rc = lcs_start_channel(&card->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		lcs_stop_channel(&card->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * stop read and write channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) lcs_stop_channels(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	LCS_DBF_TEXT(2, trace, "chhalt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	lcs_stop_channel(&card->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	lcs_stop_channel(&card->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  * Get empty buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) static struct lcs_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) __lcs_get_buffer(struct lcs_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	LCS_DBF_TEXT(5, trace, "_getbuff");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	index = channel->io_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			channel->iob[index].state = LCS_BUF_STATE_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			return channel->iob + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		index = (index + 1) & (LCS_NUM_BUFFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	} while (index != channel->io_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) static struct lcs_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) lcs_get_buffer(struct lcs_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	struct lcs_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	LCS_DBF_TEXT(5, trace, "getbuff");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	buffer = __lcs_get_buffer(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614)  * Resume channel program if the channel is suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) __lcs_resume_channel(struct lcs_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (channel->state != LCS_CH_STATE_SUSPENDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	rc = ccw_device_resume(channel->ccwdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		LCS_DBF_TEXT_(4, trace, "ersc%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			      dev_name(&channel->ccwdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		dev_err(&channel->ccwdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			"Sending data from the LCS device to the LAN failed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			" with rc=%d\n",rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		channel->state = LCS_CH_STATE_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * Make a buffer ready for processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	int prev, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	LCS_DBF_TEXT(5, trace, "rdybits");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	next = (index + 1) & (LCS_NUM_BUFFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	/* Check if we may clear the suspend bit of this buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		/* Check if we have to set the PCI bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			/* Suspend bit of the previous buffer is not set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			channel->ccws[index].flags |= CCW_FLAG_PCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		/* Suspend bit of the next buffer is set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	int index, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	LCS_DBF_TEXT(5, trace, "rdybuff");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	       buffer->state != LCS_BUF_STATE_PROCESSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	buffer->state = LCS_BUF_STATE_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	index = buffer - channel->iob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* Set length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	channel->ccws[index].count = buffer->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	/* Check relevant PCI/suspend bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	__lcs_ready_buffer_bits(channel, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	rc = __lcs_resume_channel(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  * Mark the buffer as processed. Take care of the suspend bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  * of the previous buffer. This function is called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  * interrupt context, so the lock must not be taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	int index, prev, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	LCS_DBF_TEXT(5, trace, "prcsbuff");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	BUG_ON(buffer->state != LCS_BUF_STATE_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	buffer->state = LCS_BUF_STATE_PROCESSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	index = buffer - channel->iob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	prev = (index - 1) & (LCS_NUM_BUFFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	next = (index + 1) & (LCS_NUM_BUFFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* Set the suspend bit and clear the PCI bit of this buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	channel->ccws[index].flags &= ~CCW_FLAG_PCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	/* Check the suspend bit of the previous buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		 * Previous buffer is in state ready. It might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		 * happened in lcs_ready_buffer that the suspend bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		 * has not been cleared to avoid an endless loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		 * Do it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		__lcs_ready_buffer_bits(channel, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	/* Clear PCI bit of next buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	channel->ccws[next].flags &= ~CCW_FLAG_PCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	return __lcs_resume_channel(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  * Put a processed buffer back to state empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	LCS_DBF_TEXT(5, trace, "relbuff");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	       buffer->state != LCS_BUF_STATE_PROCESSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	buffer->state = LCS_BUF_STATE_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732)  * Get buffer for a lan command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) static struct lcs_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) lcs_get_lancmd(struct lcs_card *card, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	struct lcs_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	struct lcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	LCS_DBF_TEXT(4, trace, "getlncmd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	/* Get buffer and wait if none is available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	wait_event(card->write.wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		   ((buffer = lcs_get_buffer(&card->write)) != NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	count += sizeof(struct lcs_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	*(__u16 *)(buffer->data + count) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	buffer->count = count + sizeof(__u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	buffer->callback = lcs_release_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	cmd = (struct lcs_cmd *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	cmd->offset = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	cmd->type = LCS_FRAME_TYPE_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	cmd->slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) lcs_get_reply(struct lcs_reply *reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	refcount_inc(&reply->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) lcs_put_reply(struct lcs_reply *reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	if (refcount_dec_and_test(&reply->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		kfree(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) static struct lcs_reply *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) lcs_alloc_reply(struct lcs_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	struct lcs_reply *reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	LCS_DBF_TEXT(4, trace, "getreply");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (!reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	refcount_set(&reply->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	reply->sequence_no = cmd->sequence_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	reply->received = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	reply->rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	init_waitqueue_head(&reply->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	return reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789)  * Notifier function for lancmd replies. Called from read irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	struct list_head *l, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	struct lcs_reply *reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	LCS_DBF_TEXT(4, trace, "notiwait");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	spin_lock(&card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	list_for_each_safe(l, n, &card->lancmd_waiters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		reply = list_entry(l, struct lcs_reply, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		if (reply->sequence_no == cmd->sequence_no) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			lcs_get_reply(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			list_del_init(&reply->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			if (reply->callback != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				reply->callback(card, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			reply->received = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			reply->rc = cmd->return_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			wake_up(&reply->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			lcs_put_reply(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	spin_unlock(&card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  * Emit buffer of a lan command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) lcs_lancmd_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct lcs_reply *reply = from_timer(reply, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	struct lcs_reply *list_reply, *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	LCS_DBF_TEXT(4, trace, "timeout");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	spin_lock_irqsave(&reply->card->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	list_for_each_entry_safe(list_reply, r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 				 &reply->card->lancmd_waiters,list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		if (reply == list_reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			lcs_get_reply(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			list_del_init(&reply->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			spin_unlock_irqrestore(&reply->card->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			reply->received = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			reply->rc = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			wake_up(&reply->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			lcs_put_reply(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	spin_unlock_irqrestore(&reply->card->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	struct lcs_reply *reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct lcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	LCS_DBF_TEXT(4, trace, "sendcmd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	cmd = (struct lcs_cmd *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	cmd->return_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	cmd->sequence_no = card->sequence_no++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	reply = lcs_alloc_reply(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (!reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	reply->callback = reply_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	reply->card = card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	spin_lock_irqsave(&card->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	list_add_tail(&reply->list, &card->lancmd_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	spin_unlock_irqrestore(&card->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	buffer->callback = lcs_release_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	rc = lcs_ready_buffer(&card->write, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	timer_setup(&reply->timer, lcs_lancmd_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	wait_event(reply->wait_q, reply->received);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	del_timer_sync(&reply->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	rc = reply->rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	lcs_put_reply(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	return rc ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  * LCS startup command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) lcs_send_startup(struct lcs_card *card, __u8 initiator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct lcs_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct lcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	LCS_DBF_TEXT(2, trace, "startup");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	cmd = (struct lcs_cmd *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	cmd->cmd_code = LCS_CMD_STARTUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	cmd->initiator = initiator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	return lcs_send_lancmd(card, buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * LCS shutdown command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) lcs_send_shutdown(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	struct lcs_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	struct lcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	LCS_DBF_TEXT(2, trace, "shutdown");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	cmd = (struct lcs_cmd *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	cmd->cmd_code = LCS_CMD_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	cmd->initiator = LCS_INITIATOR_TCPIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	return lcs_send_lancmd(card, buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916)  * LCS lanstat command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) __lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	LCS_DBF_TEXT(2, trace, "statcb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) lcs_send_lanstat(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	struct lcs_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	struct lcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	LCS_DBF_TEXT(2,trace, "cmdstat");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	cmd = (struct lcs_cmd *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	/* Setup lanstat command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	cmd->cmd_code = LCS_CMD_LANSTAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	cmd->initiator = LCS_INITIATOR_TCPIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	cmd->cmd.lcs_std_cmd.portno = card->portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  * send stoplan command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	struct lcs_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	struct lcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	LCS_DBF_TEXT(2, trace, "cmdstpln");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	cmd = (struct lcs_cmd *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	cmd->cmd_code = LCS_CMD_STOPLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	cmd->initiator = initiator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	cmd->cmd.lcs_std_cmd.portno = card->portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	return lcs_send_lancmd(card, buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  * send startlan command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) __lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	LCS_DBF_TEXT(2, trace, "srtlancb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	card->portno = cmd->cmd.lcs_std_cmd.portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) lcs_send_startlan(struct lcs_card *card, __u8 initiator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	struct lcs_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	struct lcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	LCS_DBF_TEXT(2, trace, "cmdstaln");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	cmd = (struct lcs_cmd *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	cmd->cmd_code = LCS_CMD_STARTLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	cmd->initiator = initiator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	cmd->cmd.lcs_std_cmd.portno = card->portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) #ifdef CONFIG_IP_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990)  * send setipm command (Multicast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	struct lcs_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	struct lcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	LCS_DBF_TEXT(2, trace, "cmdsetim");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	cmd = (struct lcs_cmd *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	cmd->cmd_code = LCS_CMD_SETIPM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	cmd->initiator = LCS_INITIATOR_TCPIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	cmd->cmd.lcs_qipassist.portno = card->portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	cmd->cmd.lcs_qipassist.version = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	return lcs_send_lancmd(card, buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  * send delipm command (Multicast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	struct lcs_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	struct lcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	LCS_DBF_TEXT(2, trace, "cmddelim");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	cmd = (struct lcs_cmd *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	cmd->cmd_code = LCS_CMD_DELIPM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	cmd->initiator = LCS_INITIATOR_TCPIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	cmd->cmd.lcs_qipassist.portno = card->portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	cmd->cmd.lcs_qipassist.version = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	       &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	return lcs_send_lancmd(card, buffer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  * check if multicast is supported by LCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) __lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	LCS_DBF_TEXT(2, trace, "chkmccb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	card->ip_assists_supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		cmd->cmd.lcs_qipassist.ip_assists_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	card->ip_assists_enabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		cmd->cmd.lcs_qipassist.ip_assists_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) lcs_check_multicast_support(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	struct lcs_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	struct lcs_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	LCS_DBF_TEXT(2, trace, "cmdqipa");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	/* Send query ipassist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	cmd = (struct lcs_cmd *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	cmd->cmd_code = LCS_CMD_QIPASSIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	cmd->initiator = LCS_INITIATOR_TCPIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	cmd->cmd.lcs_qipassist.portno = card->portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	cmd->cmd.lcs_qipassist.version = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		pr_err("Query IPAssist failed. Assuming unsupported!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)  * set or del multicast address on LCS card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) lcs_fix_multicast_list(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	struct list_head failed_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	struct lcs_ipm_list *ipm, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	LCS_DBF_TEXT(4,trace, "fixipm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	INIT_LIST_HEAD(&failed_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	spin_lock_irqsave(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) list_modified:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		switch (ipm->ipm_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		case LCS_IPM_STATE_SET_REQUIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			/* del from ipm_list so no one else can tamper with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			 * this entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			list_del_init(&ipm->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			spin_unlock_irqrestore(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 			rc = lcs_send_setipm(card, ipm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			spin_lock_irqsave(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 				pr_info("Adding multicast address failed."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 					" Table possibly full!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				/* store ipm in failed list -> will be added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 				 * to ipm_list again, so a retry will be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 				 * during the next call of this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 				list_add_tail(&ipm->list, &failed_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 				ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 				/* re-insert into ipm_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 				list_add_tail(&ipm->list, &card->ipm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			goto list_modified;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		case LCS_IPM_STATE_DEL_REQUIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			list_del(&ipm->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			spin_unlock_irqrestore(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			lcs_send_delipm(card, ipm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			spin_lock_irqsave(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			kfree(ipm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			goto list_modified;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		case LCS_IPM_STATE_ON_CARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	/* re-insert all entries from the failed_list into ipm_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	list_for_each_entry_safe(ipm, tmp, &failed_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		list_move_tail(&ipm->list, &card->ipm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	spin_unlock_irqrestore(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  * get mac address for the relevant Multicast address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	LCS_DBF_TEXT(4,trace, "getmac");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	ip_eth_mc_map(ipm, mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  * function called by net device to handle multicast address relevant things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static void lcs_remove_mc_addresses(struct lcs_card *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 				    struct in_device *in4_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	struct ip_mc_list *im4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	struct list_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	struct lcs_ipm_list *ipm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	char buf[MAX_ADDR_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	LCS_DBF_TEXT(4, trace, "remmclst");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	spin_lock_irqsave(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	list_for_each(l, &card->ipm_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		ipm = list_entry(l, struct lcs_ipm_list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		for (im4 = rcu_dereference(in4_dev->mc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		     im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 			     (memcmp(buf, &ipm->ipm.mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 				     LCS_MAC_LENGTH) == 0) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		if (im4 == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	spin_unlock_irqrestore(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 						 struct ip_mc_list *im4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 						 char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	struct lcs_ipm_list *tmp, *ipm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	struct list_head *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	LCS_DBF_TEXT(4, trace, "chkmcent");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	spin_lock_irqsave(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	list_for_each(l, &card->ipm_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		tmp = list_entry(l, struct lcs_ipm_list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		     (memcmp(buf, &tmp->ipm.mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			     LCS_MAC_LENGTH) == 0) ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			ipm = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	spin_unlock_irqrestore(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	return ipm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static void lcs_set_mc_addresses(struct lcs_card *card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 				 struct in_device *in4_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	struct ip_mc_list *im4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	struct lcs_ipm_list *ipm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	char buf[MAX_ADDR_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	LCS_DBF_TEXT(4, trace, "setmclst");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	     im4 = rcu_dereference(im4->next_rcu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		ipm = lcs_check_addr_entry(card, im4, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		if (ipm != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 			continue;	/* Address already in list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		if (ipm == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			pr_info("Not enough memory to add"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 				" new multicast entry!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		ipm->ipm.ip_addr = im4->multiaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		spin_lock_irqsave(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		list_add(&ipm->list, &card->ipm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		spin_unlock_irqrestore(&card->ipm_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) lcs_register_mc_addresses(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	struct in_device *in4_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	card = (struct lcs_card *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	LCS_DBF_TEXT(4, trace, "regmulti");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	in4_dev = in_dev_get(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	if (in4_dev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	lcs_remove_mc_addresses(card,in4_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	lcs_set_mc_addresses(card, in4_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	in_dev_put(in4_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	netif_carrier_off(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	netif_tx_disable(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	wait_event(card->write.wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			(card->write.state != LCS_CH_STATE_RUNNING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	lcs_fix_multicast_list(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	if (card->state == DEV_STATE_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		netif_carrier_on(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		netif_wake_queue(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) #endif /* CONFIG_IP_MULTICAST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)  * function called by net device to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)  * handle multicast address relevant things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) lcs_set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) #ifdef CONFIG_IP_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)         struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)         LCS_DBF_TEXT(4, trace, "setmulti");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)         card = (struct lcs_card *) dev->ml_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)         if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		schedule_work(&card->kernel_thread_starter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) #endif /* CONFIG_IP_MULTICAST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	if (!IS_ERR(irb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	switch (PTR_ERR(irb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	case -EIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		dev_warn(&cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 			"An I/O-error occurred on the LCS device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		LCS_DBF_TEXT(2, trace, "ckirberr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		LCS_DBF_TEXT_(2, trace, "  rc%d", -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		dev_warn(&cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			"A command timed out on the LCS device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		LCS_DBF_TEXT(2, trace, "ckirberr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		LCS_DBF_TEXT_(2, trace, "  rc%d", -ETIMEDOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		dev_warn(&cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			"An error occurred on the LCS device, rc=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 			PTR_ERR(irb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		LCS_DBF_TEXT(2, trace, "ckirberr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		LCS_DBF_TEXT(2, trace, "  rc???");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	return PTR_ERR(irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	int dstat, cstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	char *sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	sense = (char *) irb->ecw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	cstat = irb->scsw.cmd.cstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	dstat = irb->scsw.cmd.dstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		     SCHN_STAT_PROT_CHECK   | SCHN_STAT_PROG_CHECK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		LCS_DBF_TEXT(2, trace, "CGENCHK");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	if (dstat & DEV_STAT_UNIT_CHECK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		if (sense[LCS_SENSE_BYTE_1] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		    LCS_SENSE_RESETTING_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			LCS_DBF_TEXT(2, trace, "REVIND");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		if (sense[LCS_SENSE_BYTE_0] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		    LCS_SENSE_CMD_REJECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			LCS_DBF_TEXT(2, trace, "CMDREJ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		if ((!sense[LCS_SENSE_BYTE_0]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		    (!sense[LCS_SENSE_BYTE_1]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		    (!sense[LCS_SENSE_BYTE_2]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		    (!sense[LCS_SENSE_BYTE_3])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			LCS_DBF_TEXT(2, trace, "ZEROSEN");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		LCS_DBF_TEXT(2, trace, "DGENCHK");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) lcs_schedule_recovery(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	LCS_DBF_TEXT(2, trace, "startrec");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		schedule_work(&card->kernel_thread_starter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)  * IRQ Handler for LCS channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	struct lcs_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	int rc, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	int cstat, dstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	if (lcs_check_irb_error(cdev, irb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	card = CARD_FROM_DEV(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	if (card->read.ccwdev == cdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		channel = &card->read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		channel = &card->write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	cstat = irb->scsw.cmd.cstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	dstat = irb->scsw.cmd.dstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		      irb->scsw.cmd.dstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		      irb->scsw.cmd.actl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	/* Check for channel and device errors presented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	rc = lcs_get_problem(cdev, irb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		dev_warn(&cdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 			"The LCS device stopped because of an error,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 			" dstat=0x%X, cstat=0x%X \n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			    dstat, cstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 			channel->state = LCS_CH_STATE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	if (channel->state == LCS_CH_STATE_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		lcs_schedule_recovery(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		wake_up(&card->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	/* How far in the ccw chain have we processed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	if ((channel->state != LCS_CH_STATE_INIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	    (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	    (irb->scsw.cmd.cpa != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			- channel->ccws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		    (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 			/* Bloody io subsystem tells us lies about cpa... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 			index = (index - 1) & (LCS_NUM_BUFFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		while (channel->io_idx != index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			__lcs_processed_buffer(channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 					       channel->iob + channel->io_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			channel->io_idx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 				(channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	    (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	    (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		/* Mark channel as stopped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		channel->state = LCS_CH_STATE_STOPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		/* CCW execution stopped on a suspend bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		channel->state = LCS_CH_STATE_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		if (irb->scsw.cmd.cc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			ccw_device_halt(channel->ccwdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		/* The channel has been stopped by halt_IO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		channel->state = LCS_CH_STATE_HALTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		channel->state = LCS_CH_STATE_CLEARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	/* Do the rest in the tasklet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	tasklet_schedule(&channel->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)  * Tasklet for IRQ handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) lcs_tasklet(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	struct lcs_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	struct lcs_buffer *iob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	int buf_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	channel = (struct lcs_channel *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	/* Check for processed buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	iob = channel->iob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	buf_idx = channel->buf_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		/* Do the callback thing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		if (iob[buf_idx].callback != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			iob[buf_idx].callback(channel, iob + buf_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	channel->buf_idx = buf_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	if (channel->state == LCS_CH_STATE_STOPPED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		lcs_start_channel(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	if (channel->state == LCS_CH_STATE_SUSPENDED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	    channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		__lcs_resume_channel(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	/* Something happened on the channel. Wake up waiters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	wake_up(&channel->wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)  * Finish current tx buffer and make it ready for transmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) __lcs_emit_txbuffer(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	LCS_DBF_TEXT(5, trace, "emittx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	*(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	card->tx_buffer->count += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	lcs_ready_buffer(&card->write, card->tx_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	card->tx_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	card->tx_emitted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)  * Callback for finished tx buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	LCS_DBF_TEXT(5, trace, "txbuffcb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	/* Put buffer back to pool. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	lcs_release_buffer(channel, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	card = container_of(channel, struct lcs_card, write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		netif_wake_queue(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	spin_lock(&card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	card->tx_emitted--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		 * Last running tx buffer has finished. Submit partially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		 * filled current buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		__lcs_emit_txbuffer(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	spin_unlock(&card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)  * Packet transmit function called by network stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		 struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	struct lcs_header *header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	int rc = NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	LCS_DBF_TEXT(5, trace, "hardxmit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		card->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		card->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	if (card->state != DEV_STATE_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		card->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		card->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		card->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	if (skb->protocol == htons(ETH_P_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	netif_stop_queue(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	spin_lock(&card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	if (card->tx_buffer != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	    card->tx_buffer->count + sizeof(struct lcs_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	    skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		/* skb too big for current tx buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		__lcs_emit_txbuffer(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	if (card->tx_buffer == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		/* Get new tx buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		card->tx_buffer = lcs_get_buffer(&card->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		if (card->tx_buffer == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			card->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			rc = NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		card->tx_buffer->callback = lcs_txbuffer_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		card->tx_buffer->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	header = (struct lcs_header *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		(card->tx_buffer->data + card->tx_buffer->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	header->offset = card->tx_buffer->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	header->type = card->lan_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	header->slot = card->portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	skb_copy_from_linear_data(skb, header + 1, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	spin_unlock(&card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	card->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	card->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	netif_wake_queue(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	spin_lock(&card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		/* If this is the first tx buffer emit it immediately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		__lcs_emit_txbuffer(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	spin_unlock(&card->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	LCS_DBF_TEXT(5, trace, "pktxmit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	card = (struct lcs_card *) dev->ml_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	rc = __lcs_start_xmit(card, skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  * send startlan and lanstat command to make LCS device ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) lcs_startlan_auto(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	LCS_DBF_TEXT(2, trace, "strtauto");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) #ifdef CONFIG_ETHERNET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	card->lan_type = LCS_FRAME_TYPE_ENET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) #ifdef CONFIG_FDDI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	card->lan_type = LCS_FRAME_TYPE_FDDI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) lcs_startlan(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	LCS_DBF_TEXT(2, trace, "startlan");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	if (card->portno != LCS_INVALID_PORT_NO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		if (card->lan_type == LCS_FRAME_TYPE_AUTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 			rc = lcs_startlan_auto(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)                 for (i = 0; i <= 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)                         card->portno = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)                         if (card->lan_type != LCS_FRAME_TYPE_AUTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)                                 rc = lcs_send_startlan(card,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)                                                        LCS_INITIATOR_TCPIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)                         else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)                                 /* autodetecting lan type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)                                 rc = lcs_startlan_auto(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)                         if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)                                 break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)                 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		return lcs_send_lanstat(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)  * LCS detect function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)  * setup channels and make them I/O ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) lcs_detect(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	LCS_DBF_TEXT(2, setup, "lcsdetct");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	/* start/reset card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	if (card->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		netif_stop_queue(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	rc = lcs_stop_channels(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		rc = lcs_start_channels(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 			if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 				rc = lcs_startlan(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		card->state = DEV_STATE_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		card->state = DEV_STATE_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		card->write.state = LCS_CH_STATE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		card->read.state =  LCS_CH_STATE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)  * LCS Stop card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) lcs_stopcard(struct lcs_card *card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	LCS_DBF_TEXT(3, setup, "stopcard");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	if (card->read.state != LCS_CH_STATE_STOPPED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	    card->write.state != LCS_CH_STATE_STOPPED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	    card->read.state != LCS_CH_STATE_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	    card->write.state != LCS_CH_STATE_ERROR &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	    card->state == DEV_STATE_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		lcs_clear_multicast_list(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		rc = lcs_send_shutdown(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	rc = lcs_stop_channels(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	card->state = DEV_STATE_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)  * Kernel Thread helper functions for LGW initiated commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) lcs_start_kernel_thread(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	LCS_DBF_TEXT(5, trace, "krnthrd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		kthread_run(lcs_recovery, card, "lcs_recover");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) #ifdef CONFIG_IP_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		kthread_run(lcs_register_mc_addresses, card, "regipm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)  * Process control frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	LCS_DBF_TEXT(5, trace, "getctrl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	if (cmd->initiator == LCS_INITIATOR_LGW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		switch(cmd->cmd_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		case LCS_CMD_STARTUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		case LCS_CMD_STARTLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 			lcs_schedule_recovery(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		case LCS_CMD_STOPLAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 			pr_warn("Stoplan for %s initiated by LGW\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 				card->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 			if (card->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 				netif_carrier_off(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 			LCS_DBF_TEXT(5, trace, "noLGWcmd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		lcs_notify_lancmd_waiters(card, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)  * Unpack network packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	LCS_DBF_TEXT(5, trace, "getskb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	if (card->dev == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	    card->state != DEV_STATE_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		/* The card isn't up. Ignore the packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	skb = dev_alloc_skb(skb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		dev_err(&card->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 			" Allocating a socket buffer to interface %s failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			  card->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		card->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	skb_put_data(skb, skb_data, skb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	skb->protocol =	card->lan_type_trans(skb, card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	card->stats.rx_bytes += skb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	card->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	if (skb->protocol == htons(ETH_P_802_2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		*((__u32 *)skb->cb) = ++card->pkt_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)  * LCS main routine to get packets and lancmd replies from the buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	struct lcs_header *lcs_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	__u16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	LCS_DBF_TEXT(5, trace, "lcsgtpkt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	lcs_hdr = (struct lcs_header *) buffer->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		LCS_DBF_TEXT(4, trace, "-eiogpkt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	card = container_of(channel, struct lcs_card, read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	while (lcs_hdr->offset != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		if (lcs_hdr->offset <= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		    lcs_hdr->offset > LCS_IOBUFFERSIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		    lcs_hdr->offset < offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 			/* Offset invalid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 			card->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 			card->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		/* What kind of frame is it? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 			/* Control frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 			lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 			 lcs_hdr->type == LCS_FRAME_TYPE_TR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 			 lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 			/* Normal network packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 			lcs_get_skb(card, (char *)(lcs_hdr + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 				    lcs_hdr->offset - offset -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 				    sizeof(struct lcs_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 			/* Unknown frame type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 			; // FIXME: error message ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		/* Proceed to next frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		offset = lcs_hdr->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		lcs_hdr = (struct lcs_header *) (buffer->data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	/* The buffer is now empty. Make it ready again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	lcs_ready_buffer(&card->read, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)  * get network statistics for ifconfig and other user programs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) static struct net_device_stats *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) lcs_getstats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	LCS_DBF_TEXT(4, trace, "netstats");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	card = (struct lcs_card *) dev->ml_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	return &card->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)  * stop lcs device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)  * This function will be called by user doing ifconfig xxx down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) lcs_stop_device(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	LCS_DBF_TEXT(2, trace, "stopdev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	card   = (struct lcs_card *) dev->ml_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	netif_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	dev->flags &= ~IFF_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	wait_event(card->write.wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		(card->write.state != LCS_CH_STATE_RUNNING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	rc = lcs_stopcard(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		dev_err(&card->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			" Shutting down the LCS device failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)  * start lcs device and make it runnable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)  * This function will be called by user doing ifconfig xxx up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) lcs_open_device(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	LCS_DBF_TEXT(2, trace, "opendev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	card = (struct lcs_card *) dev->ml_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	/* initialize statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	rc = lcs_detect(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		pr_err("Error in opening device!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		dev->flags |= IFF_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		card->state = DEV_STATE_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)  * show function for portno called by cat or similar things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)         struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	card = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)         if (!card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)                 return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)         return sprintf(buf, "%d\n", card->portno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)  * store the value which is piped to file portno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)         struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	s16 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	card = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)         if (!card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)                 return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	rc = kstrtos16(buf, 0, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)         /* TODO: sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)         card->portno = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	if (card->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		card->dev->dev_port = card->portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)         return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) static const char *lcs_type[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	"not a channel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	"2216 parallel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	"2216 channel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	"OSA LCS card",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	"unknown channel type",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	"unsupported channel type",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	struct ccwgroup_device *cgdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	cgdev = to_ccwgroupdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	if (!cgdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	card = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)         struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	unsigned int value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	card = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)         if (!card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)                 return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	rc = kstrtouint(buf, 0, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)         /* TODO: sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)         card->lancmd_timeout = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)         return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		      const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	struct lcs_card *card = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	char *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	if (!card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	if (card->state != DEV_STATE_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	i = simple_strtoul(buf, &tmp, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	if (i == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		lcs_schedule_recovery(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) static struct attribute * lcs_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	&dev_attr_portno.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	&dev_attr_type.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	&dev_attr_lancmd_timeout.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	&dev_attr_recover.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) static struct attribute_group lcs_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	.attrs = lcs_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) static const struct attribute_group *lcs_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	&lcs_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) static const struct device_type lcs_devtype = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	.name = "lcs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	.groups = lcs_attr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)  * lcs_probe_device is called on establishing a new ccwgroup_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) lcs_probe_device(struct ccwgroup_device *ccwgdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	if (!get_device(&ccwgdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	LCS_DBF_TEXT(2, setup, "add_dev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)         card = lcs_alloc_card();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)         if (!card) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		LCS_DBF_TEXT_(2, setup, "  rc%d", -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		put_device(&ccwgdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)                 return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	dev_set_drvdata(&ccwgdev->dev, card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	ccwgdev->cdev[0]->handler = lcs_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	ccwgdev->cdev[1]->handler = lcs_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	card->gdev = ccwgdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	card->thread_start_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	card->thread_allowed_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	card->thread_running_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	ccwgdev->dev.type = &lcs_devtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) lcs_register_netdev(struct ccwgroup_device *ccwgdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	LCS_DBF_TEXT(2, setup, "regnetdv");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	card = dev_get_drvdata(&ccwgdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	if (card->dev->reg_state != NETREG_UNINITIALIZED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	return register_netdev(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)  * lcs_new_device will be called by setting the group device online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) static const struct net_device_ops lcs_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	.ndo_open		= lcs_open_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	.ndo_stop		= lcs_stop_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	.ndo_get_stats		= lcs_getstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	.ndo_start_xmit		= lcs_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) static const struct net_device_ops lcs_mc_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	.ndo_open		= lcs_open_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	.ndo_stop		= lcs_stop_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	.ndo_get_stats		= lcs_getstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	.ndo_start_xmit		= lcs_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	.ndo_set_rx_mode	= lcs_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) lcs_new_device(struct ccwgroup_device *ccwgdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	struct  lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	struct net_device *dev=NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	enum lcs_dev_states recover_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	card = dev_get_drvdata(&ccwgdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	if (!card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	LCS_DBF_TEXT(2, setup, "newdev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	card->read.ccwdev  = ccwgdev->cdev[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	card->write.ccwdev = ccwgdev->cdev[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	recover_state = card->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	rc = ccw_device_set_online(card->read.ccwdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	rc = ccw_device_set_online(card->write.ccwdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		goto out_werr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	LCS_DBF_TEXT(3, setup, "lcsnewdv");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	lcs_setup_card(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	rc = lcs_detect(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		LCS_DBF_TEXT(2, setup, "dtctfail");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		dev_err(&ccwgdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 			"Detecting a network adapter for LCS devices"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 			" failed with rc=%d (0x%x)\n", rc, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		lcs_stopcard(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	if (card->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		LCS_DBF_TEXT(2, setup, "samedev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		LCS_DBF_HEX(3, setup, &card, sizeof(void*));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		goto netdev_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	switch (card->lan_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) #ifdef CONFIG_ETHERNET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	case LCS_FRAME_TYPE_ENET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		card->lan_type_trans = eth_type_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		dev = alloc_etherdev(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) #ifdef CONFIG_FDDI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	case LCS_FRAME_TYPE_FDDI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 		card->lan_type_trans = fddi_type_trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		dev = alloc_fddidev(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		LCS_DBF_TEXT(3, setup, "errinit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		pr_err(" Initialization failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	card->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	card->dev->ml_priv = card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	card->dev->netdev_ops = &lcs_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	card->dev->dev_port = card->portno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) #ifdef CONFIG_IP_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	if (!lcs_check_multicast_support(card))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		card->dev->netdev_ops = &lcs_mc_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) netdev_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	lcs_set_allowed_threads(card,0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	if (recover_state == DEV_STATE_RECOVER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		lcs_set_multicast_list(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		card->dev->flags |= IFF_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		netif_carrier_on(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		netif_wake_queue(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		card->state = DEV_STATE_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		lcs_stopcard(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	if (lcs_register_netdev(ccwgdev) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	/* Print out supported assists: IPv6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	pr_info("LCS device %s %s IPv6 support\n", card->dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		(card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		"with" : "without");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	/* Print out supported assist: Multicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	pr_info("LCS device %s %s Multicast support\n", card->dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		(card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		"with" : "without");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	ccw_device_set_offline(card->write.ccwdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) out_werr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	ccw_device_set_offline(card->read.ccwdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)  * lcs_shutdown_device, called when setting the group device offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	enum lcs_dev_states recover_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	int ret = 0, ret2 = 0, ret3 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	LCS_DBF_TEXT(3, setup, "shtdndev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	card = dev_get_drvdata(&ccwgdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	if (!card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	if (recovery_mode == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		lcs_set_allowed_threads(card, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	recover_state = card->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	ret = lcs_stop_device(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	ret2 = ccw_device_set_offline(card->read.ccwdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	ret3 = ccw_device_set_offline(card->write.ccwdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		ret = (ret2) ? ret2 : ret3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		LCS_DBF_TEXT_(3, setup, "1err:%d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	if (recover_state == DEV_STATE_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		card->state = DEV_STATE_RECOVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	return __lcs_shutdown_device(ccwgdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)  * drive lcs recovery after startup and startlan initiated by Lan Gateway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) lcs_recovery(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	struct ccwgroup_device *gdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)         int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	card = (struct lcs_card *) ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	LCS_DBF_TEXT(4, trace, "recover1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	LCS_DBF_TEXT(4, trace, "recover2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	gdev = card->gdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	dev_warn(&gdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		"A recovery process has been started for the LCS device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	rc = __lcs_shutdown_device(gdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	rc = lcs_new_device(gdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		pr_info("Device %s successfully recovered!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 			card->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		pr_info("Device %s could not be recovered!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 			card->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)  * lcs_remove_device, free buffers and card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) lcs_remove_device(struct ccwgroup_device *ccwgdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	struct lcs_card *card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	card = dev_get_drvdata(&ccwgdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	if (!card)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	LCS_DBF_TEXT(3, setup, "remdev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	LCS_DBF_HEX(3, setup, &card, sizeof(void*));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	if (ccwgdev->state == CCWGROUP_ONLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		lcs_shutdown_device(ccwgdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	if (card->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		unregister_netdev(card->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	lcs_cleanup_card(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	lcs_free_card(card);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	dev_set_drvdata(&ccwgdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	put_device(&ccwgdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) static struct ccw_device_id lcs_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	{CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	{CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	{CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) MODULE_DEVICE_TABLE(ccw, lcs_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) static struct ccw_driver lcs_ccw_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		.owner	= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		.name	= "lcs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	.ids	= lcs_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	.probe	= ccwgroup_probe_ccwdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	.remove	= ccwgroup_remove_ccwdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	.int_class = IRQIO_LCS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)  * LCS ccwgroup driver registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) static struct ccwgroup_driver lcs_group_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		.owner	= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		.name	= "lcs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	.ccw_driver  = &lcs_ccw_driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	.setup	     = lcs_probe_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	.remove      = lcs_remove_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	.set_online  = lcs_new_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	.set_offline = lcs_shutdown_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) static ssize_t group_store(struct device_driver *ddrv, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 			   size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	return err ? err : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) static DRIVER_ATTR_WO(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) static struct attribute *lcs_drv_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	&driver_attr_group.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) static struct attribute_group lcs_drv_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	.attrs = lcs_drv_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) static const struct attribute_group *lcs_drv_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	&lcs_drv_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)  *  LCS Module/Kernel initialization function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) __init lcs_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	pr_info("Loading %s\n", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	rc = lcs_register_debug_facility();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	LCS_DBF_TEXT(0, setup, "lcsinit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	lcs_root_dev = root_device_register("lcs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	rc = PTR_ERR_OR_ZERO(lcs_root_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		goto register_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	rc = ccw_driver_register(&lcs_ccw_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		goto ccw_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	lcs_group_driver.driver.groups = lcs_drv_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	rc = ccwgroup_driver_register(&lcs_group_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		goto ccwgroup_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) ccwgroup_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	ccw_driver_unregister(&lcs_ccw_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) ccw_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	root_device_unregister(lcs_root_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) register_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	lcs_unregister_debug_facility();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	pr_err("Initializing the lcs device driver failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)  *  LCS module cleanup function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) __exit lcs_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	pr_info("Terminating lcs module.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	LCS_DBF_TEXT(0, trace, "cleanup");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	ccwgroup_driver_unregister(&lcs_group_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	ccw_driver_unregister(&lcs_ccw_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	root_device_unregister(lcs_root_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	lcs_unregister_debug_facility();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) module_init(lcs_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) module_exit(lcs_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)