Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * File Name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *   skfddi.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright Information:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *   Copyright SysKonnect 1998,1999.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * The information in this file is provided "AS IS" without warranty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Abstract:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *   A Linux device driver supporting the SysKonnect FDDI PCI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *   familie.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * Maintainers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *   CG    Christoph Goos (cgoos@syskonnect.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * Contributors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *   DM    David S. Miller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * Address all question to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *   linux@syskonnect.de
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * The technical manual for the adapters is available from SysKonnect's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * web pages: www.syskonnect.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * Goto "Support" and search Knowledge Base for "manual".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * Driver Architecture:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *   The driver architecture is based on the DEC FDDI driver by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  *   Lawrence V. Stefani and several ethernet drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *   I also used an existing Windows NT miniport driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  *   All hardware dependent functions are handled by the SysKonnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  *   Hardware Module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *   The only headerfiles that are directly related to this source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *   are skfddi.c, h/types.h, h/osdef1st.h, h/targetos.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  *   The others belong to the SysKonnect FDDI Hardware Module and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *   should better not be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * Modification History:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  *              Date            Name    Description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *              02-Mar-98       CG	Created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  *		10-Mar-99	CG	Support for 2.2.x added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  *		25-Mar-99	CG	Corrected IRQ routing for SMP (APIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *		26-Oct-99	CG	Fixed compilation error on 2.2.13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *		12-Nov-99	CG	Source code release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  *		22-Nov-99	CG	Included in kernel source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *		07-May-00	DM	64 bit fixes, new dma interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *		31-Jul-03	DB	Audit copy_*_user in skfp_ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  *					  Daniele Bellucci <bellucda@tiscali.it>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  *		03-Dec-03	SH	Convert to PCI device model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * Compilation options (-Dxxx):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  *              DRIVERDEBUG     print lots of messages to log file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  *              DUMPPACKETS     print received/transmitted packets to logfile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * Tested cpu architectures:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  *	- i386
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  *	- sparc64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) /* Version information string - should be updated prior to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) /* each new release!!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define VERSION		"2.07"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static const char * const boot_msg = 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	"SysKonnect FDDI PCI Adapter driver v" VERSION " for\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	"  SK-55xx/SK-58xx adapters (SK-NET FDDI-FP/UP/LP)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) /* Include files */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #include <linux/fddidevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #include	"h/types.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #undef ADDR			// undo Linux definition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #include	"h/skfbi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #include	"h/fddi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #include	"h/smc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #include	"h/smtstate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) // Define module-wide (static) routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) static int skfp_driver_init(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static int skfp_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static int skfp_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static irqreturn_t skfp_interrupt(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static void skfp_ctl_set_multicast_list(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 				       struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static void send_queued_packets(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) static void ResetAdapter(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) // Functions needed by the hardware module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) void *mac_drv_get_space(struct s_smc *smc, u_int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) void *mac_drv_get_desc_mem(struct s_smc *smc, u_int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) unsigned long dma_master(struct s_smc *smc, void *virt, int len, int flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		  int flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) void llc_restart_tx(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			 int frag_count, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			 int frag_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) void mac_drv_fill_rxd(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		       int frag_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		    int la_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) void dump_data(unsigned char *Data, int length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) // External functions from the hardware module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) extern u_int mac_drv_check_space(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) extern int mac_drv_init(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) extern void hwm_tx_frag(struct s_smc *smc, char far * virt, u_long phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			int len, int frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) extern int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		       int frame_len, int frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) extern void fddi_isr(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) extern void hwm_rx_frag(struct s_smc *smc, char far * virt, u_long phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 			int len, int frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) extern void mac_drv_clear_rx_queue(struct s_smc *smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) extern void enable_tx_irq(struct s_smc *smc, u_short queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) static const struct pci_device_id skfddi_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	{ PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	{ }			/* Terminating entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) // Define module-wide (static) variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) static int num_boards;	/* total number of adapters configured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) static const struct net_device_ops skfp_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	.ndo_open		= skfp_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	.ndo_stop		= skfp_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	.ndo_start_xmit		= skfp_send_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	.ndo_get_stats		= skfp_ctl_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	.ndo_set_rx_mode	= skfp_ctl_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	.ndo_set_mac_address	= skfp_ctl_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	.ndo_do_ioctl		= skfp_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  * =================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  * = skfp_init_one =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * =================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  *   Probes for supported FDDI PCI controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179)  *   Condition code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  *   pdev - pointer to PCI device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * Functional Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  *   This is now called by PCI driver registration process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  *   for each board found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * Return Codes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  *   0           - This device (fddi0, fddi1, etc) configured successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  *   -ENODEV - No devices present, or no SysKonnect FDDI PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  *                         present for this device name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * Side Effects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  *   Device structures for FDDI adapters (fddi0, fddi1, etc) are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  *   initialized and the board resources are read and stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  *   the device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static int skfp_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 				const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	struct s_smc *smc;	/* board pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	void __iomem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	pr_debug("entering skfp_init_one\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	if (num_boards == 0) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		printk("%s\n", boot_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	err = pci_request_regions(pdev, "skfddi");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		goto err_out1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) #ifdef MEM_MAPPED_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		printk(KERN_ERR "skfp: region is not an MMIO resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		goto err_out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	mem = ioremap(pci_resource_start(pdev, 0), 0x4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	if (!(pci_resource_flags(pdev, 1) & IO_RESOURCE_IO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		printk(KERN_ERR "skfp: region is not PIO resource\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		goto err_out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	mem = ioport_map(pci_resource_start(pdev, 1), FP_IO_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		printk(KERN_ERR "skfp:  Unable to map register, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 				"FDDI adapter will be disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		goto err_out2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	dev = alloc_fddidev(sizeof(struct s_smc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		printk(KERN_ERR "skfp: Unable to allocate fddi device, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 				"FDDI adapter will be disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		goto err_out3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	dev->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	dev->netdev_ops = &skfp_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	/* Initialize board structure with bus-specific info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	smc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	smc->os.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	smc->os.bus_type = SK_BUS_TYPE_PCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	smc->os.pdev = *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	smc->os.QueueSkb = MAX_TX_QUEUE_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	smc->os.MaxFrameSize = MAX_FRAME_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	smc->os.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	smc->hw.slot = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	smc->hw.iop = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	smc->os.ResetRequested = FALSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	skb_queue_head_init(&smc->os.SendSkbQueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	dev->base_addr = (unsigned long)mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	err = skfp_driver_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		goto err_out4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		goto err_out5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	++num_boards;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	if ((pdev->subsystem_device & 0xff00) == 0x5500 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	    (pdev->subsystem_device & 0xff00) == 0x5800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		printk("%s: SysKonnect FDDI PCI adapter"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		       " found (SK-%04X)\n", dev->name,	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		       pdev->subsystem_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		printk("%s: FDDI PCI adapter found\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) err_out5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (smc->os.SharedMemAddr) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		dma_free_coherent(&pdev->dev, smc->os.SharedMemSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 				  smc->os.SharedMemAddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 				  smc->os.SharedMemDMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	dma_free_coherent(&pdev->dev, MAX_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			  smc->os.LocalRxBuffer, smc->os.LocalRxBufferDMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) err_out4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) err_out3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) #ifdef MEM_MAPPED_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	iounmap(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	ioport_unmap(mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) err_out2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) err_out1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * Called for each adapter board from pci_unregister_driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) static void skfp_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	struct net_device *p = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	struct s_smc *lp = netdev_priv(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	unregister_netdev(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (lp->os.SharedMemAddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 				  lp->os.SharedMemSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 				  lp->os.SharedMemAddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 				  lp->os.SharedMemDMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		lp->os.SharedMemAddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	if (lp->os.LocalRxBuffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 				  MAX_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 				  lp->os.LocalRxBuffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 				  lp->os.LocalRxBufferDMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		lp->os.LocalRxBuffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) #ifdef MEM_MAPPED_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	iounmap(lp->hw.iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	ioport_unmap(lp->hw.iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	free_netdev(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  * ====================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  * = skfp_driver_init =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  * ====================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  *   Initializes remaining adapter board structure information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  *   and makes sure adapter is in a safe state prior to skfp_open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  *   Condition code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  *   dev - pointer to device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  * Functional Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  *   This function allocates additional resources such as the host memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  *   blocks needed by the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  *   The adapter is also reset. The OS must call skfp_open() to open 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  *   the adapter and bring it on-line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  * Return Codes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  *    0 - initialization succeeded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  *   -1 - initialization failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) static  int skfp_driver_init(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	struct s_smc *smc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	skfddi_priv *bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	int err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	pr_debug("entering skfp_driver_init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	// set the io address in private structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	bp->base_addr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	// Get the interrupt level from the PCI Configuration Table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	smc->hw.irq = dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	spin_lock_init(&bp->DriverLock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	// Allocate invalid frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	bp->LocalRxBuffer = dma_alloc_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 					       &bp->LocalRxBufferDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 					       GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (!bp->LocalRxBuffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		printk("could not allocate mem for ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		printk("LocalRxBuffer: %d byte\n", MAX_FRAME_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	// Determine the required size of the 'shared' memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	bp->SharedMemSize = mac_drv_check_space();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	pr_debug("Memory for HWM: %ld\n", bp->SharedMemSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	if (bp->SharedMemSize > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		bp->SharedMemSize += 16;	// for descriptor alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 						       bp->SharedMemSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 						       &bp->SharedMemDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 						       GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		if (!bp->SharedMemAddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			printk("could not allocate mem for ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			printk("hardware module: %ld byte\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			       bp->SharedMemSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		bp->SharedMemAddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	bp->SharedMemHeap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	card_stop(smc);		// Reset adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	pr_debug("mac_drv_init()..\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (mac_drv_init(smc) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		pr_debug("mac_drv_init() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	read_address(smc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	smt_reset_defaults(smc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (bp->SharedMemAddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		dma_free_coherent(&bp->pdev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 				  bp->SharedMemSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 				  bp->SharedMemAddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 				  bp->SharedMemDMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		bp->SharedMemAddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (bp->LocalRxBuffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		dma_free_coherent(&bp->pdev.dev, MAX_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 				  bp->LocalRxBuffer, bp->LocalRxBufferDMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		bp->LocalRxBuffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) }				// skfp_driver_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458)  * =============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  * = skfp_open =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  * =============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463)  *   Opens the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466)  *   Condition code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  *   dev - pointer to device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471)  * Functional Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472)  *   This function brings the adapter to an operational state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474)  * Return Codes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475)  *   0           - Adapter was successfully opened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  *   -EAGAIN - Could not register IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) static int skfp_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	struct s_smc *smc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	pr_debug("entering skfp_open\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	/* Register IRQ - support shared interrupts by passing device ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			  dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	 * Set current address to factory MAC address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	 * Note: We've already done this step in skfp_driver_init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	 *       However, it's possible that a user has set a node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	 *               address override, then closed and reopened the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	 *               adapter.  Unless we reset the device address field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	 *               now, we'll continue to use the existing modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	 *               address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	read_address(smc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	init_smt(smc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	smt_online(smc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	STI_FBI();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	/* Clear local multicast address tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	mac_clear_multicast(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/* Disable promiscuous filter settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) }				// skfp_open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  * ==============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520)  * = skfp_close =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  * ==============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524)  *   Closes the device/module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  *   Condition code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  *   dev - pointer to device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  * Functional Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  *   This routine closes the adapter and brings it to a safe state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  *   The interrupt service routine is deregistered with the OS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  *   The adapter can be opened again with another call to skfp_open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537)  * Return Codes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538)  *   Always return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540)  * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541)  *   No further requests for this adapter are made after this routine is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542)  *   called.  skfp_open() can be called to reset and reinitialize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  *   adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) static int skfp_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	struct s_smc *smc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	skfddi_priv *bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	CLI_FBI();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	smt_reset_defaults(smc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	card_stop(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	mac_drv_clear_tx_queue(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	mac_drv_clear_rx_queue(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	/* Deregister (free) IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	skb_queue_purge(&bp->SendSkbQueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	bp->QueueSkb = MAX_TX_QUEUE_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) }				// skfp_close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * ==================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * = skfp_interrupt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * ==================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  *   Interrupt processing routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  *   None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  *   irq        - interrupt vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  *   dev_id     - pointer to device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * Functional Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  *   This routine calls the interrupt processing routine for this adapter.  It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  *   disables and reenables adapter interrupts, as appropriate.  We can support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  *   shared interrupts since the incoming dev_id pointer provides our device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  *   structure context. All the real work is done in the hardware module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * Return Codes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  *   None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  *   The interrupt acknowledgement at the hardware level (eg. ACKing the PIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  *   on Intel-based systems) is done by the operating system outside this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  *   routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  *       System interrupts are enabled through this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  * Side Effects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  *   Interrupts are disabled, then reenabled at the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) static irqreturn_t skfp_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	struct s_smc *smc;	/* private board structure pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	skfddi_priv *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	smc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	// IRQs enabled or disabled ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	if (inpd(ADDR(B0_IMSK)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		// IRQs are disabled: must be shared interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	// Note: At this point, IRQs are enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	if ((inpd(ISR_A) & smc->hw.is_imask) == 0) {	// IRQ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		// Adapter did not issue an IRQ: must be shared interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	CLI_FBI();		// Disable IRQs from our adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	spin_lock(&bp->DriverLock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	// Call interrupt handler in hardware module (HWM).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	fddi_isr(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	if (smc->os.ResetRequested) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		ResetAdapter(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		smc->os.ResetRequested = FALSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	spin_unlock(&bp->DriverLock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	STI_FBI();		// Enable IRQs from our adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }				// skfp_interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * ======================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * = skfp_ctl_get_stats =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * ======================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  *   Get statistics for FDDI adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  *   Pointer to FDDI statistics structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  *   dev - pointer to device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  * Functional Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)  *   Gets current MIB objects from adapter, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  *   returns FDDI statistics structure as defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  *   in if_fddi.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  *   Note: Since the FDDI statistics structure is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  *   still new and the device structure doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  *   have an FDDI-specific get statistics handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  *   we'll return the FDDI statistics structure as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  *   a pointer to an Ethernet statistics structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  *   That way, at least the first part of the statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  *   structure can be decoded properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  *   We'll have to pay attention to this routine as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  *   device structure becomes more mature and LAN media
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  *   independent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	struct s_smc *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	/* Fill the bp->stats structure with driver-maintained counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	bp->os.MacStat.port_bs_flag[0] = 0x1234;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	bp->os.MacStat.port_bs_flag[1] = 0x5678;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) // goos: need to fill out fddi statistic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	/* Get FDDI SMT MIB objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) /* Fill the bp->stats structure with the SMT MIB object values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	memcpy(&bp->stats.port_requested_paths[0 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	memcpy(&bp->stats.port_requested_paths[1 * 3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	/* Fill the bp->stats structure with the FDDI counter values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	return (struct net_device_stats *)&bp->os.MacStat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) }				// ctl_get_stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  * ==============================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798)  * = skfp_ctl_set_multicast_list =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799)  * ==============================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802)  *   Enable/Disable LLC frame promiscuous mode reception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803)  *   on the adapter and/or update multicast address table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806)  *   None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809)  *   dev - pointer to device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  * Functional Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  *   This function acquires the driver lock and only calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  *   skfp_ctl_set_multicast_list_wo_lock then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  *   This routine follows a fairly simple algorithm for setting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  *   adapter filters and CAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  *      if IFF_PROMISC flag is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  *              enable promiscuous mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819)  *      else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820)  *              disable promiscuous mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  *              if number of multicast addresses <= max. multicast number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  *                      add mc addresses to adapter table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  *              else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  *                      enable promiscuous mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  *              update adapter filters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  *   Multicast addresses are presented in canonical (LSB) format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830)  * Side Effects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831)  *   On-board adapter filters are updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) static void skfp_ctl_set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	struct s_smc *smc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	skfddi_priv *bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	unsigned long Flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	spin_lock_irqsave(&bp->DriverLock, Flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	skfp_ctl_set_multicast_list_wo_lock(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	spin_unlock_irqrestore(&bp->DriverLock, Flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) }				// skfp_ctl_set_multicast_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	struct s_smc *smc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	/* Enable promiscuous mode, if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		mac_drv_rx_mode(smc, RX_ENABLE_PROMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		pr_debug("PROMISCUOUS MODE ENABLED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	/* Else, update multicast address table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		pr_debug("PROMISCUOUS MODE DISABLED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		// Reset all MC addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		mac_clear_multicast(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		mac_drv_rx_mode(smc, RX_DISABLE_ALLMULTI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			pr_debug("ENABLE ALL MC ADDRESSES\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		} else if (!netdev_mc_empty(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				/* use exact filtering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 				// point to first multicast addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 					mac_add_multicast(smc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 						(struct fddi_addr *)ha->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 						1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 					pr_debug("ENABLE MC ADDRESS: %pMF\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 						 ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			} else {	// more MC addresses than HW supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 				mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 				pr_debug("ENABLE ALL MC ADDRESSES\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		} else {	// no MC addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			pr_debug("DISABLE ALL MC ADDRESSES\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		/* Update adapter filters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		mac_update_multicast(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) }				// skfp_ctl_set_multicast_list_wo_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * ===========================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * = skfp_ctl_set_mac_address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  * ===========================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  *   set new mac address on adapter and update dev_addr field in device table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  *   None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  *   dev  - pointer to device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  *   addr - pointer to sockaddr structure containing unicast address to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913)  * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914)  *   The address pointed to by addr->sa_data is a valid unicast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915)  *   address and is presented in canonical (LSB) format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	struct s_smc *smc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	struct sockaddr *p_sockaddr = (struct sockaddr *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	skfddi_priv *bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	unsigned long Flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	spin_lock_irqsave(&bp->DriverLock, Flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	ResetAdapter(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	spin_unlock_irqrestore(&bp->DriverLock, Flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	return 0;		/* always return zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) }				// skfp_ctl_set_mac_address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * ==============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * = skfp_ioctl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * ==============
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  * Perform IOCTL call functions here. Some are privileged operations and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942)  * effective uid is checked in those cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945)  *   status value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  *   0 - success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947)  *   other - failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  *   dev  - pointer to device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  *   rq - pointer to ioctl request structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  *   cmd - ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	struct s_smc *smc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	skfddi_priv *lp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	struct s_skfp_ioctl ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	if (copy_from_user(&ioc, rq->ifr_data, sizeof(struct s_skfp_ioctl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	switch (ioc.cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	case SKFP_GET_STATS:	/* Get the driver statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		ioc.len = sizeof(lp->MacStat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		status = copy_to_user(ioc.data, skfp_ctl_get_stats(dev), ioc.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 				? -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	case SKFP_CLR_STATS:	/* Zero out the driver statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		if (!capable(CAP_NET_ADMIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			status = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			memset(&lp->MacStat, 0, sizeof(lp->MacStat));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		status = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	}			// switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) }				// skfp_ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991)  * =====================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992)  * = skfp_send_pkt     =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  * =====================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  *   Queues a packet for transmission and try to transmit it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  *   Condition code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  *   skb - pointer to sk_buff to queue for transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  *   dev - pointer to device information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  * Functional Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  *   Here we assume that an incoming skb transmit request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  *   is contained in a single physically contiguous buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  *   in which the virtual address of the start of packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  *   (skb->data) can be converted to a physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  *   by using pci_map_single().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  *   We have an internal queue for packets we can not send 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  *   immediately. Packets in this queue can be given to the 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  *   adapter if transmit buffers are freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  *   We can't free the skb until after it's been DMA'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  *   out by the adapter, so we'll keep it in the driver and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  *   return it in mac_drv_tx_complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  * Return Codes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  *   0 - driver has queued and/or sent packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  *       1 - caller should requeue the sk_buff for later transmission
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  *   The entire packet is stored in one physically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  *   contiguous buffer which is not cached and whose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  *   32-bit physical address can be determined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  *   It's vital that this routine is NOT reentered for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  *   same board and that the OS is not in another section of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  *   code (eg. skfp_interrupt) for the same board on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  *   different thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)  * Side Effects:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)  *   None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 				       struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	struct s_smc *smc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	skfddi_priv *bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	pr_debug("skfp_send_pkt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	 * Verify that incoming transmit request is OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	 * Note: The packet size check is consistent with other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	 *               Linux device drivers, although the correct packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	 *               size should be verified before calling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	 *               transmit routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	if (!(skb->len >= FDDI_K_LLC_ZLEN && skb->len <= FDDI_K_LLC_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		bp->MacStat.gen.tx_errors++;	/* bump error counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		// dequeue packets from xmt queue and send them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		return NETDEV_TX_OK;	/* return "success" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (bp->QueueSkb == 0) {	// return with tbusy set: queue full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	bp->QueueSkb--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	skb_queue_tail(&bp->SendSkbQueue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	send_queued_packets(netdev_priv(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (bp->QueueSkb == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }				// skfp_send_pkt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)  * =======================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)  * = send_queued_packets =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)  * =======================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)  *   
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  * Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  *   Send packets from the driver queue as long as there are some and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)  *   transmit resources are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  *  
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)  *   None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)  *       
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)  *   smc - pointer to smc (adapter) structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  * Functional Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  *   Take a packet from queue if there is any. If not, then we are done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  *   Check if there are resources to send the packet. If not, requeue it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  *   and exit. 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)  *   Set packet descriptor flags and give packet to adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  *   Check if any send resources can be freed (we do not use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  *   transmit complete interrupt).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static void send_queued_packets(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	skfddi_priv *bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	unsigned char fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	int queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	struct s_smt_fp_txd *txd;	// Current TxD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	dma_addr_t dma_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	unsigned long Flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	int frame_status;	// HWM tx frame status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	pr_debug("send queued packets\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		// send first buffer from queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		skb = skb_dequeue(&bp->SendSkbQueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			pr_debug("queue empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		}		// queue empty !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		spin_lock_irqsave(&bp->DriverLock, Flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		fc = skb->data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		queue = (fc & FC_SYNC_BIT) ? QUEUE_S : QUEUE_A0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) #ifdef ESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		// Check if the frame may/must be sent as a synchronous frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		if ((fc & ~(FC_SYNC_BIT | FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			// It's an LLC frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			if (!smc->ess.sync_bw_available)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 				fc &= ~FC_SYNC_BIT; // No bandwidth available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 			else {	// Bandwidth is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				if (smc->mib.fddiESSSynchTxMode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 					// Send as sync. frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 					fc |= FC_SYNC_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) #endif				// ESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		frame_status = hwm_tx_init(smc, fc, 1, skb->len, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		if ((frame_status & (LOC_TX | LAN_TX)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			// Unable to send the frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			if ((frame_status & RING_DOWN) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 				// Ring is down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 				pr_debug("Tx attempt while ring down.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			} else if ((frame_status & OUT_OF_TXD) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 				pr_debug("%s: out of TXDs.\n", bp->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 				pr_debug("%s: out of transmit resources",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 					bp->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			// Note: We will retry the operation as soon as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			// transmit resources become available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 			skb_queue_head(&bp->SendSkbQueue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			spin_unlock_irqrestore(&bp->DriverLock, Flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			return;	// Packet has been queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		}		// if (unable to send frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		bp->QueueSkb++;	// one packet less in local queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		// source address in packet ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		CheckSourceAddress(skb->data, smc->hw.fddi_canon_addr.a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		txd = (struct s_smt_fp_txd *) HWM_GET_CURR_TXD(smc, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		dma_address = pci_map_single(&bp->pdev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 					     skb->len, PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		if (frame_status & LAN_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			txd->txd_os.skb = skb;			// save skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			txd->txd_os.dma_addr = dma_address;	// save dma mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		hwm_tx_frag(smc, skb->data, dma_address, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)                       frame_status | FIRST_FRAG | LAST_FRAG | EN_IRQ_EOF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		if (!(frame_status & LAN_TX)) {		// local only frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			pci_unmap_single(&bp->pdev, dma_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 					 skb->len, PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		spin_unlock_irqrestore(&bp->DriverLock, Flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	}			// for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	return;			// never reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }				// send_queued_packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)  * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)  * CheckSourceAddress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)  * Verify if the source address is set. Insert it if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	unsigned char SRBit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	if ((((unsigned long) frame[1 + 6]) & ~0x01) != 0) // source routing bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	if ((unsigned short) frame[1 + 10] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	SRBit = frame[1 + 6] & 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	frame[8] |= SRBit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }				// CheckSourceAddress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)  *	ResetAdapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)  *	Reset the adapter and bring it back to operational mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static void ResetAdapter(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	pr_debug("[fddi: ResetAdapter]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	// Stop the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	card_stop(smc);		// Stop all activity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	// Clear the transmit and receive descriptor queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	mac_drv_clear_tx_queue(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	mac_drv_clear_rx_queue(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	// Restart the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	smt_reset_defaults(smc, 1);	// Initialize the SMT module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	init_smt(smc, (smc->os.dev)->dev_addr);	// Initialize the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	smt_online(smc, 1);	// Insert into the ring again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	STI_FBI();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	// Restore original receive mode (multicasts, promiscuous, etc.).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	skfp_ctl_set_multicast_list_wo_lock(smc->os.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }				// ResetAdapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) //--------------- functions called by hardware module ----------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)  *	llc_restart_tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)  *	The hardware driver calls this routine when the transmit complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)  *	interrupt bits (end of frame) for the synchronous or asynchronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)  *	queue is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)  * NOTE The hardware driver calls this function also if no packets are queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)  *	The routine must be able to handle this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) void llc_restart_tx(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	skfddi_priv *bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	pr_debug("[llc_restart_tx]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	// Try to send queued packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	spin_unlock(&bp->DriverLock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	send_queued_packets(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	spin_lock(&bp->DriverLock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	netif_start_queue(bp->dev);// system may send again if it was blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }				// llc_restart_tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  *	mac_drv_get_space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  *	The hardware module calls this function to allocate the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  *	for the SMT MBufs if the define MB_OUTSIDE_SMC is specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)  *	size - Size of memory in bytes to allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)  *	!= 0	A pointer to the virtual address of the allocated memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)  *	== 0	Allocation error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	void *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	pr_debug("mac_drv_get_space (%d bytes), ", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	virt = (void *) (smc->os.SharedMemAddr + smc->os.SharedMemHeap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		printk("Unexpected SMT memory size requested: %d\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	smc->os.SharedMemHeap += size;	// Move heap pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	pr_debug("mac_drv_get_space end\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	pr_debug("virt addr: %lx\n", (ulong) virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	pr_debug("bus  addr: %lx\n", (ulong)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	       (smc->os.SharedMemDMA +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		((char *) virt - (char *)smc->os.SharedMemAddr)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	return virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }				// mac_drv_get_space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  *	mac_drv_get_desc_mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  *	This function is called by the hardware dependent module.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  *	It allocates the memory for the RxD and TxD descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)  *	This memory must be non-cached, non-movable and non-swappable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)  *	This memory should start at a physical page boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  *	size - Size of memory in bytes to allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  *	!= 0	A pointer to the virtual address of the allocated memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  *	== 0	Allocation error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	char *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	pr_debug("mac_drv_get_desc_mem\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	// Descriptor memory must be aligned on 16-byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	virt = mac_drv_get_space(smc, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	size = (u_int) (16 - (((unsigned long) virt) & 15UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	size = size % 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	pr_debug("Allocate %u bytes alignment gap ", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	pr_debug("for descriptor memory.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	if (!mac_drv_get_space(smc, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		printk("fddi: Unable to align descriptor memory.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	return virt + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }				// mac_drv_get_desc_mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)  *	mac_drv_virt2phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)  *	Get the physical address of a given virtual address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  *	virt - A (virtual) pointer into our 'shared' memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  *	Physical address of the given virtual address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	return smc->os.SharedMemDMA +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		((char *) virt - (char *)smc->os.SharedMemAddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }				// mac_drv_virt2phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)  *	dma_master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)  *	The HWM calls this function, when the driver leads through a DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)  *	transfer. If the OS-specific module must prepare the system hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)  *	for the DMA transfer, it should do it in this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)  *	The hardware module calls this dma_master if it wants to send an SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)  *	frame.  This means that the virt address passed in here is part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)  *      the 'shared' memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  *	virt - The virtual address of the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  *	len - The length in bytes of the data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)  *	flag - Indicates the transmit direction and the buffer type:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)  *		DMA_RD	(0x01)	system RAM ==> adapter buffer memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)  *		DMA_WR	(0x02)	adapter buffer memory ==> system RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)  *		SMT_BUF (0x80)	SMT buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)  *	>> NOTE: SMT_BUF and DMA_RD are always set for PCI. <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)  *	Returns the pyhsical address for the DMA transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	return smc->os.SharedMemDMA +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		((char *) virt - (char *)smc->os.SharedMemAddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }				// dma_master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  *	dma_complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  *	The hardware module calls this routine when it has completed a DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)  *	transfer. If the operating system dependent module has set up the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)  *	channel via dma_master() (e.g. Windows NT or AIX) it should clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)  *	the DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)  *	descr - A pointer to a TxD or RxD, respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)  *	flag - Indicates the DMA transfer direction / SMT buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)  *		DMA_RD	(0x01)	system RAM ==> adapter buffer memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)  *		DMA_WR	(0x02)	adapter buffer memory ==> system RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)  *		SMT_BUF (0x80)	SMT buffer (managed by HWM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	/* For TX buffers, there are two cases.  If it is an SMT transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	 * buffer, there is nothing to do since we use consistent memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	 * for the 'shared' memory area.  The other case is for normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	 * transmit packets given to us by the networking stack, and in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	 * that case we cleanup the PCI DMA mapping in mac_drv_tx_complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	 * below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	 * For RX buffers, we have to unmap dynamic PCI DMA mappings here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	 * because the hardware module is about to potentially look at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	 * the contents of the buffer.  If we did not call the PCI DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	 * unmap first, the hardware module could read inconsistent data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	if (flag & DMA_WR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		skfddi_priv *bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		volatile struct s_smt_fp_rxd *r = &descr->r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		/* If SKB is NULL, we used the local buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		if (r->rxd_os.skb && r->rxd_os.dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 			int MaxFrameSize = bp->MaxFrameSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			pci_unmap_single(&bp->pdev, r->rxd_os.dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 					 MaxFrameSize, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 			r->rxd_os.dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }				// dma_complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)  *	mac_drv_tx_complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)  *	Transmit of a packet is complete. Release the tx staging buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)  *	txd - A pointer to the last TxD which is used by the frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)  *	Returns nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	pr_debug("entering mac_drv_tx_complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	// Check if this TxD points to a skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	if (!(skb = txd->txd_os.skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		pr_debug("TXD with no skb assigned.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	txd->txd_os.skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	// release the DMA mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	pci_unmap_single(&smc->os.pdev, txd->txd_os.dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			 skb->len, PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	txd->txd_os.dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	smc->os.MacStat.gen.tx_packets++;	// Count transmitted packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	smc->os.MacStat.gen.tx_bytes+=skb->len;	// Count bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	// free the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	pr_debug("leaving mac_drv_tx_complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }				// mac_drv_tx_complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)  * dump packets to logfile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) #ifdef DUMPPACKETS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) void dump_data(unsigned char *Data, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	printk(KERN_INFO "---Packet start---\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, Data, min_t(size_t, length, 64), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	printk(KERN_INFO "------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }				// dump_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) #define dump_data(data,len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) #endif				// DUMPPACKETS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)  *	mac_drv_rx_complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)  *	The hardware module calls this function if an LLC frame is received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)  *	in a receive buffer. Also the SMT, NSA, and directed beacon frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)  *	from the network will be passed to the LLC layer by this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)  *	if passing is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)  *	mac_drv_rx_complete forwards the frame to the LLC layer if it should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)  *	be received. It also fills the RxD ring with new receive buffers if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)  *	some can be queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  *	rxd - A pointer to the first RxD which is used by the receive frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  *	frag_count - Count of RxDs used by the received frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  *	len - Frame length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			 int frag_count, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	skfddi_priv *bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	unsigned char *virt, *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	unsigned short ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	u_int RifLength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	pr_debug("entering mac_drv_rx_complete (len=%d)\n", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	if (frag_count != 1) {	// This is not allowed to happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		printk("fddi: Multi-fragment receive!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		goto RequeueRxd;	// Re-use the given RXD(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	skb = rxd->rxd_os.skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		pr_debug("No skb in rxd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		smc->os.MacStat.gen.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		goto RequeueRxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	virt = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	// The DMA mapping was released in dma_complete above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	dump_data(skb->data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	 * FDDI Frame format:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	 * +-------+-------+-------+------------+--------+------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	 * | FC[1] | DA[6] | SA[6] | RIF[0..18] | LLC[3] | Data[0..n] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	 * +-------+-------+-------+------------+--------+------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	 * FC = Frame Control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	 * DA = Destination Address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	 * SA = Source Address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	 * RIF = Routing Information Field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	 * LLC = Logical Link Control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	// Remove Routing Information Field (RIF), if present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	if ((virt[1 + 6] & FDDI_RII) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		RifLength = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) // goos: RIF removal has still to be tested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		pr_debug("RIF found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		// Get RIF length from Routing Control (RC) field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		cp = virt + FDDI_MAC_HDR_LEN;	// Point behind MAC header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		ri = ntohs(*((__be16 *) cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		RifLength = ri & FDDI_RCF_LEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 			printk("fddi: Invalid RIF.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 			goto RequeueRxd;	// Discard the frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		virt[1 + 6] &= ~FDDI_RII;	// Clear RII bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		// regions overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		virt = cp + RifLength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		for (n = FDDI_MAC_HDR_LEN; n; n--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 			*--virt = *--cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		// adjust sbd->data pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		skb_pull(skb, RifLength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		len -= RifLength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		RifLength = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	// Count statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	smc->os.MacStat.gen.rx_packets++;	// Count indicated receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 						// packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	smc->os.MacStat.gen.rx_bytes+=len;	// Count bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	// virt points to header again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	if (virt[1] & 0x01) {	// Check group (multicast) bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		smc->os.MacStat.gen.multicast++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	// deliver frame to system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	rxd->rxd_os.skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	skb_trim(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	skb->protocol = fddi_type_trans(skb, bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)       RequeueRxd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	pr_debug("Rx: re-queue RXD.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	mac_drv_requeue_rxd(smc, rxd, frag_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	smc->os.MacStat.gen.rx_errors++;	// Count receive packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 						// not indicated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }				// mac_drv_rx_complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)  *	mac_drv_requeue_rxd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)  *	The hardware module calls this function to request the OS-specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)  *	module to queue the receive buffer(s) represented by the pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)  *	to the RxD and the frag_count into the receive queue again. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)  *	buffer was filled with an invalid frame or an SMT frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)  *	rxd - A pointer to the first RxD which is used by the receive frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)  *	frag_count - Count of RxDs used by the received frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 			 int frag_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	volatile struct s_smt_fp_rxd *next_rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	volatile struct s_smt_fp_rxd *src_rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	int MaxFrameSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	unsigned char *v_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	dma_addr_t b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	if (frag_count != 1)	// This is not allowed to happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		printk("fddi: Multi-fragment requeue!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	MaxFrameSize = smc->os.MaxFrameSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	src_rxd = rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	for (; frag_count > 0; frag_count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		next_rxd = src_rxd->rxd_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		rxd = HWM_GET_CURR_RXD(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		skb = src_rxd->rxd_os.skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		if (skb == NULL) {	// this should not happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			pr_debug("Requeue with no skb in rxd!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 			skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 				// we got a skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 				rxd->rxd_os.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 				skb_reserve(skb, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 				skb_put(skb, MaxFrameSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 				v_addr = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 				b_addr = pci_map_single(&smc->os.pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 							v_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 							MaxFrameSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 							PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 				rxd->rxd_os.dma_addr = b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 				// no skb available, use local buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 				pr_debug("Queueing invalid buffer!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 				rxd->rxd_os.skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 				v_addr = smc->os.LocalRxBuffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 				b_addr = smc->os.LocalRxBufferDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 			// we use skb from old rxd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 			rxd->rxd_os.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			v_addr = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 			b_addr = pci_map_single(&smc->os.pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 						v_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 						MaxFrameSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 						PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			rxd->rxd_os.dma_addr = b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 			    FIRST_FRAG | LAST_FRAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		src_rxd = next_rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }				// mac_drv_requeue_rxd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)  *	mac_drv_fill_rxd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)  *	The hardware module calls this function at initialization time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)  *	to fill the RxD ring with receive buffers. It is also called by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)  *	mac_drv_rx_complete if rx_free is large enough to queue some new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)  *	receive buffers into the RxD ring. mac_drv_fill_rxd queues new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)  *	receive buffers as long as enough RxDs and receive buffers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)  *	available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) void mac_drv_fill_rxd(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	int MaxFrameSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	unsigned char *v_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	unsigned long b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	volatile struct s_smt_fp_rxd *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	pr_debug("entering mac_drv_fill_rxd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	// Walk through the list of free receive buffers, passing receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	// buffers to the HWM as long as RXDs are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	MaxFrameSize = smc->os.MaxFrameSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	// Check if there is any RXD left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	while (HWM_GET_RX_FREE(smc) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		pr_debug(".\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		rxd = HWM_GET_CURR_RXD(smc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		skb = alloc_skb(MaxFrameSize + 3, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			// we got a skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 			skb_reserve(skb, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 			skb_put(skb, MaxFrameSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 			v_addr = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 			b_addr = pci_map_single(&smc->os.pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 						v_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 						MaxFrameSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 						PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 			rxd->rxd_os.dma_addr = b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			// no skb available, use local buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			// System has run out of buffer memory, but we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			// keep the receiver running in hope of better times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			// Multiple descriptors may point to this local buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 			// so data in it must be considered invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			pr_debug("Queueing invalid buffer!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			v_addr = smc->os.LocalRxBuffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 			b_addr = smc->os.LocalRxBufferDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		rxd->rxd_os.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		// Pass receive buffer to HWM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		hwm_rx_frag(smc, v_addr, b_addr, MaxFrameSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 			    FIRST_FRAG | LAST_FRAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	pr_debug("leaving mac_drv_fill_rxd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }				// mac_drv_fill_rxd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)  *	mac_drv_clear_rxd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)  *	The hardware module calls this function to release unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  *	receive buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)  *	rxd - A pointer to the first RxD which is used by the receive buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)  *	frag_count - Count of RxDs used by the receive buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		       int frag_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	pr_debug("entering mac_drv_clear_rxd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	if (frag_count != 1)	// This is not allowed to happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		printk("fddi: Multi-fragment clear!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	for (; frag_count > 0; frag_count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		skb = rxd->rxd_os.skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		if (skb != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 			skfddi_priv *bp = &smc->os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 			int MaxFrameSize = bp->MaxFrameSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 			pci_unmap_single(&bp->pdev, rxd->rxd_os.dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 					 MaxFrameSize, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 			rxd->rxd_os.skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		rxd = rxd->rxd_next;	// Next RXD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }				// mac_drv_clear_rxd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)  *	mac_drv_rx_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)  *	The hardware module calls this routine when an SMT or NSA frame of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)  *	local SMT should be delivered to the LLC layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)  *	It is necessary to have this function, because there is no other way to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)  *	copy the contents of SMT MBufs into receive buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)  *	mac_drv_rx_init allocates the required target memory for this frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)  *	and receives the frame fragment by fragment by calling mac_drv_rx_frag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)  *	len - The length (in bytes) of the received frame (FC, DA, SA, Data).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)  *	fc - The Frame Control field of the received frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)  *	look_ahead - A pointer to the lookahead data buffer (may be NULL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)  *	la_len - The length of the lookahead data stored in the lookahead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)  *	buffer (may be zero).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)  *	Always returns zero (0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		    char *look_ahead, int la_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	pr_debug("entering mac_drv_rx_init(len=%d)\n", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	// "Received" a SMT or NSA frame of the local SMT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	if (len != la_len || len < FDDI_MAC_HDR_LEN || !look_ahead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		pr_debug("fddi: Discard invalid local SMT frame\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		pr_debug("  len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		       len, la_len, (unsigned long) look_ahead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	skb = alloc_skb(len + 3, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		pr_debug("fddi: Local SMT: skb memory exhausted.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	skb_reserve(skb, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	skb_copy_to_linear_data(skb, look_ahead, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	// deliver frame to system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	skb->protocol = fddi_type_trans(skb, smc->os.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) }				// mac_drv_rx_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)  *	smt_timer_poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)  *	This routine is called periodically by the SMT module to clean up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)  *	driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)  *	Return any queued frames back to the upper protocol layers if the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)  *	is down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) void smt_timer_poll(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }				// smt_timer_poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)  *	ring_status_indication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)  *	This function indicates a change of the ring state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)  *	status - The current ring status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) void ring_status_indication(struct s_smc *smc, u_long status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	pr_debug("ring_status_indication( ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	if (status & RS_RES15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		pr_debug("RS_RES15 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	if (status & RS_HARDERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		pr_debug("RS_HARDERROR ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	if (status & RS_SOFTERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		pr_debug("RS_SOFTERROR ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	if (status & RS_BEACON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		pr_debug("RS_BEACON ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	if (status & RS_PATHTEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		pr_debug("RS_PATHTEST ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	if (status & RS_SELFTEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		pr_debug("RS_SELFTEST ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	if (status & RS_RES9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		pr_debug("RS_RES9 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	if (status & RS_DISCONNECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		pr_debug("RS_DISCONNECT ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	if (status & RS_RES7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		pr_debug("RS_RES7 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	if (status & RS_DUPADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		pr_debug("RS_DUPADDR ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	if (status & RS_NORINGOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		pr_debug("RS_NORINGOP ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	if (status & RS_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		pr_debug("RS_VERSION ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	if (status & RS_STUCKBYPASSS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		pr_debug("RS_STUCKBYPASSS ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	if (status & RS_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		pr_debug("RS_EVENT ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	if (status & RS_RINGOPCHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		pr_debug("RS_RINGOPCHANGE ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	if (status & RS_RES0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		pr_debug("RS_RES0 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	pr_debug("]\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }				// ring_status_indication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)  *	smt_get_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)  *	Gets the current time from the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)  *	None.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)  *	The current time in TICKS_PER_SECOND.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)  *	TICKS_PER_SECOND has the unit 'count of timer ticks per second'. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)  *	defined in "targetos.h". The definition of TICKS_PER_SECOND must comply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)  *	to the time returned by smt_get_time().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) unsigned long smt_get_time(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	return jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }				// smt_get_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  *	smt_stat_counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)  *	Status counter update (ring_op, fifo full).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)  *	stat -	= 0: A ring operational change occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)  *		= 1: The FORMAC FIFO buffer is full / FIFO overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) void smt_stat_counter(struct s_smc *smc, int stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) //      BOOLEAN RingIsUp ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	pr_debug("smt_stat_counter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	switch (stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		pr_debug("Ring operational change.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		pr_debug("Receive fifo overflow.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		smc->os.MacStat.gen.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		pr_debug("Unknown status (%d).\n", stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }				// smt_stat_counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)  *	cfm_state_change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  *	Sets CFM state in custom statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)  *	c_state - Possible values are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)  *		EC0_OUT, EC1_IN, EC2_TRACE, EC3_LEAVE, EC4_PATH_TEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)  *		EC5_INSERT, EC6_CHECK, EC7_DEINSERT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) void cfm_state_change(struct s_smc *smc, int c_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) #ifdef DRIVERDEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	char *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	switch (c_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	case SC0_ISOLATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		s = "SC0_ISOLATED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	case SC1_WRAP_A:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		s = "SC1_WRAP_A";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	case SC2_WRAP_B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		s = "SC2_WRAP_B";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	case SC4_THRU_A:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		s = "SC4_THRU_A";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	case SC5_THRU_B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		s = "SC5_THRU_B";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	case SC7_WRAP_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		s = "SC7_WRAP_S";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	case SC9_C_WRAP_A:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		s = "SC9_C_WRAP_A";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	case SC10_C_WRAP_B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		s = "SC10_C_WRAP_B";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	case SC11_C_WRAP_S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		s = "SC11_C_WRAP_S";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		pr_debug("cfm_state_change: unknown %d\n", c_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	pr_debug("cfm_state_change: %s\n", s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) #endif				// DRIVERDEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }				// cfm_state_change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)  *	ecm_state_change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)  *	Sets ECM state in custom statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)  *	e_state - Possible values are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)  *		SC0_ISOLATED, SC1_WRAP_A (5), SC2_WRAP_B (6), SC4_THRU_A (12),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)  *		SC5_THRU_B (7), SC7_WRAP_S (8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) void ecm_state_change(struct s_smc *smc, int e_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) #ifdef DRIVERDEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	char *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	switch (e_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	case EC0_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		s = "EC0_OUT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	case EC1_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		s = "EC1_IN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	case EC2_TRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		s = "EC2_TRACE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	case EC3_LEAVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		s = "EC3_LEAVE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	case EC4_PATH_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		s = "EC4_PATH_TEST";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	case EC5_INSERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		s = "EC5_INSERT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	case EC6_CHECK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		s = "EC6_CHECK";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	case EC7_DEINSERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		s = "EC7_DEINSERT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		s = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	pr_debug("ecm_state_change: %s\n", s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) #endif				//DRIVERDEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }				// ecm_state_change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)  *	rmt_state_change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)  *	Sets RMT state in custom statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)  *	r_state - Possible values are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)  *		RM0_ISOLATED, RM1_NON_OP, RM2_RING_OP, RM3_DETECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)  *		RM4_NON_OP_DUP, RM5_RING_OP_DUP, RM6_DIRECTED, RM7_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) void rmt_state_change(struct s_smc *smc, int r_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) #ifdef DRIVERDEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	char *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	switch (r_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	case RM0_ISOLATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		s = "RM0_ISOLATED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	case RM1_NON_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		s = "RM1_NON_OP - not operational";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	case RM2_RING_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		s = "RM2_RING_OP - ring operational";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	case RM3_DETECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		s = "RM3_DETECT - detect dupl addresses";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	case RM4_NON_OP_DUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		s = "RM4_NON_OP_DUP - dupl. addr detected";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	case RM5_RING_OP_DUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		s = "RM5_RING_OP_DUP - ring oper. with dupl. addr";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	case RM6_DIRECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		s = "RM6_DIRECTED - sending directed beacons";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	case RM7_TRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		s = "RM7_TRACE - trace initiated";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		s = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	pr_debug("[rmt_state_change: %s]\n", s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) #endif				// DRIVERDEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }				// rmt_state_change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) /************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)  *	drv_reset_indication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)  *	This function is called by the SMT when it has detected a severe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)  *	hardware problem. The driver should perform a reset on the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)  *	as soon as possible, but not from within this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)  * Args
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)  *	smc - A pointer to the SMT context struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)  * Out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)  *	Nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)  ************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) void drv_reset_indication(struct s_smc *smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	pr_debug("entering drv_reset_indication\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	smc->os.ResetRequested = TRUE;	// Set flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }				// drv_reset_indication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) static struct pci_driver skfddi_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	.name		= "skfddi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	.id_table	= skfddi_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	.probe		= skfp_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	.remove		= skfp_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) module_pci_driver(skfddi_pci_driver);