Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * smc911x.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) 2005 Sensoria Corp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *	   Derived from the unified SMC91x driver by Nicolas Pitre
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *	   and the smsc911x.c reference driver by SMSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *	 watchdog  = TX watchdog timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *	 tx_fifo_kb = Size of TX FIFO in KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * History:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *	  04/16/05	Dustin McIntire		 Initial version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) static const char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 	 "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) /* Debugging options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #define ENABLE_SMC_DEBUG_RX		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #define ENABLE_SMC_DEBUG_TX		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #define ENABLE_SMC_DEBUG_DMA		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #define ENABLE_SMC_DEBUG_PKTS		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define ENABLE_SMC_DEBUG_MISC		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define ENABLE_SMC_DEBUG_FUNC		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define SMC_DEBUG_RX		((ENABLE_SMC_DEBUG_RX	? 1 : 0) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define SMC_DEBUG_TX		((ENABLE_SMC_DEBUG_TX	? 1 : 0) << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define SMC_DEBUG_DMA		((ENABLE_SMC_DEBUG_DMA	? 1 : 0) << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define SMC_DEBUG_PKTS		((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define SMC_DEBUG_MISC		((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define SMC_DEBUG_FUNC		((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #ifndef SMC_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define SMC_DEBUG	 ( SMC_DEBUG_RX	  | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 			   SMC_DEBUG_TX	  | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 			   SMC_DEBUG_DMA  | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 			   SMC_DEBUG_PKTS | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 			   SMC_DEBUG_MISC | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 			   SMC_DEBUG_FUNC   \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 			 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include "smc911x.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * Transmit timeout, default 5 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static int watchdog = 5000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) module_param(watchdog, int, 0400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static int tx_fifo_kb=8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) module_param(tx_fifo_kb, int, 0400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) MODULE_ALIAS("platform:smc911x");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * The internal workings of the driver.  If you are changing anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * here with the SMC stuff, you should have the datasheet and know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * what you are doing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define CARDNAME "smc911x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  * Use power-down feature of the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define POWER_DOWN		 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #if SMC_DEBUG > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define DBG(n, dev, args...)			 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	do {					 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		if (SMC_DEBUG & (n))		 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 			netdev_dbg(dev, args);	 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define PRINTK(dev, args...)   netdev_info(dev, args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define DBG(n, dev, args...)   do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define PRINTK(dev, args...)   netdev_dbg(dev, args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #if SMC_DEBUG_PKTS > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) static void PRINT_PKT(u_char *buf, int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	int remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	int lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	lines = length / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	remainder = length % 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	for (i = 0; i < lines ; i ++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		int cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		printk(KERN_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		for (cur = 0; cur < 8; cur++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 			u_char a, b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			a = *buf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			b = *buf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			pr_cont("%02x%02x ", a, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	printk(KERN_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	for (i = 0; i < remainder/2 ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		u_char a, b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		a = *buf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		b = *buf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		pr_cont("%02x%02x ", a, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define PRINT_PKT(x...)  do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) /* this enables an interrupt in the interrupt mask register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define SMC_ENABLE_INT(lp, x) do {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	unsigned int  __mask;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	__mask = SMC_GET_INT_EN((lp));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	__mask |= (x);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	SMC_SET_INT_EN((lp), __mask);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) /* this disables an interrupt from the interrupt mask register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #define SMC_DISABLE_INT(lp, x) do {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	unsigned int  __mask;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	__mask = SMC_GET_INT_EN((lp));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	__mask &= ~(x);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	SMC_SET_INT_EN((lp), __mask);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * this does a soft reset on the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static void smc911x_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	unsigned int reg, timeout=0, resets=1, irq_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	/*	 Take out of PM setting first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		/* Write to the bytetest will take out of powerdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		SMC_SET_BYTE_TEST(lp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		timeout=10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 			reg = SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		} while (--timeout && !reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		if (timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 			PRINTK(dev, "smc911x_reset timeout waiting for PM restore\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	/* Disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	SMC_SET_INT_EN(lp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	while (resets--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		SMC_SET_HW_CFG(lp, HW_CFG_SRST_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		timeout=10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 			reg = SMC_GET_HW_CFG(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			/* If chip indicates reset timeout then try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			if (reg & HW_CFG_SRST_TO_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 				PRINTK(dev, "chip reset timeout, retrying...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 				resets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		} while (--timeout && (reg & HW_CFG_SRST_));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	if (timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		PRINTK(dev, "smc911x_reset timeout waiting for reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	/* make sure EEPROM has finished loading before setting GPIO_CFG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	timeout=1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	while (--timeout && (SMC_GET_E2P_CMD(lp) & E2P_CMD_EPC_BUSY_))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	if (timeout == 0){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		PRINTK(dev, "smc911x_reset timeout waiting for EEPROM busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	/* Initialize interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	SMC_SET_INT_EN(lp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	SMC_ACK_INT(lp, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	/* Reset the FIFO level and flow control settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	SMC_SET_HW_CFG(lp, (lp->tx_fifo_kb & 0xF) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) //TODO: Figure out what appropriate pause time is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	SMC_SET_FLOW(lp, FLOW_FCPT_ | FLOW_FCEN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	SMC_SET_AFC_CFG(lp, lp->afc_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	/* Set to LED outputs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	SMC_SET_GPIO_CFG(lp, 0x70070000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	 * Deassert IRQ for 1*10us for edge type interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	 * and drive IRQ pin push-pull
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) #ifdef SMC_DYNAMIC_BUS_CONFIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	if (lp->cfg.irq_polarity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		irq_cfg |= INT_CFG_IRQ_POL_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	SMC_SET_IRQ_CFG(lp, irq_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	/* clear anything saved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	if (lp->pending_tx_skb != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		dev_kfree_skb (lp->pending_tx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		lp->pending_tx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * Enable Interrupts, Receive, and Transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) static void smc911x_enable(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	unsigned mask, cfg, cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	SMC_SET_MAC_ADDR(lp, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	/* Enable TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	cfg = SMC_GET_HW_CFG(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	cfg |= HW_CFG_SF_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	SMC_SET_HW_CFG(lp, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	SMC_SET_FIFO_TDA(lp, 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	/* Update TX stats on every 64 packets received or every 1 sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	SMC_SET_FIFO_TSL(lp, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	SMC_GET_MAC_CR(lp, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	SMC_SET_MAC_CR(lp, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	/* Add 2 byte padding to start of packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	/* Turn on receiver and enable RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	if (cr & MAC_CR_RXEN_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		DBG(SMC_DEBUG_RX, dev, "Receiver already enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	/* Interrupt on every received packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	SMC_SET_FIFO_RSA(lp, 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	SMC_SET_FIFO_RSL(lp, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	/* now, enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		INT_EN_PHY_INT_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	if (IS_REV_A(lp->revision))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		mask|=INT_EN_RDFL_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		mask|=INT_EN_RDFO_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	SMC_ENABLE_INT(lp, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  * this puts the device in an inactive state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) static void smc911x_shutdown(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	unsigned cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	DBG(SMC_DEBUG_FUNC, dev, "%s: --> %s\n", CARDNAME, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	/* Disable IRQ's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	SMC_SET_INT_EN(lp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	/* Turn of Rx and TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	SMC_GET_MAC_CR(lp, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	SMC_SET_MAC_CR(lp, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	SMC_SET_TX_CFG(lp, TX_CFG_STOP_TX_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static inline void smc911x_drop_pkt(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	unsigned int fifo_count, timeout, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "%s: --> %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	    CARDNAME, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (fifo_count <= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		/* Manually dump the packet data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		while (fifo_count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			SMC_GET_RX_FIFO(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	} else	 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		/* Fast forward through the bad packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		SMC_SET_RX_DP_CTRL(lp, RX_DP_CTRL_FFWD_BUSY_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		timeout=50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 			reg = SMC_GET_RX_DP_CTRL(lp) & RX_DP_CTRL_FFWD_BUSY_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		} while (--timeout && reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		if (timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 			PRINTK(dev, "timeout waiting for RX fast forward\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * This is the procedure to handle the receipt of a packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * It should be called after checking for packet presence in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * the RX status FIFO.	 It must be called with the spin lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * already held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static inline void	 smc911x_rcv(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	unsigned int pkt_len, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	unsigned char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, dev, "--> %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	    __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	status = SMC_GET_RX_STS_FIFO(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	DBG(SMC_DEBUG_RX, dev, "Rx pkt len %d status 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	    (status & 0x3fff0000) >> 16, status & 0xc000ffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (status & RX_STS_ES_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		/* Deal with a bad packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		if (status & RX_STS_CRC_ERR_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			if (status & RX_STS_LEN_ERR_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 				dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			if (status & RX_STS_MCAST_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 				dev->stats.multicast++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		/* Remove the bad packet data from the RX FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		smc911x_drop_pkt(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		/* Receive a valid packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		/* Alloc a buffer with extra room for DMA alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		skb = netdev_alloc_skb(dev, pkt_len+32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		if (unlikely(skb == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			PRINTK(dev, "Low memory, rcvd packet dropped.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			smc911x_drop_pkt(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		/* Align IP header to 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		 * Note that the device is configured to add a 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		 * byte padding to the packet start, so we really
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		 * want to write to the orignal data pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		skb_reserve(skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		skb_put(skb,pkt_len-4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		unsigned int fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		/* Lower the FIFO threshold if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		fifo = SMC_GET_FIFO_INT(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		if (fifo & 0xFF) fifo--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		DBG(SMC_DEBUG_RX, dev, "Setting RX stat FIFO threshold to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		    fifo & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		SMC_SET_FIFO_INT(lp, fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		/* Setup RX DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		lp->rxdma_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		lp->current_rx_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		SMC_PULL_DATA(lp, data, (pkt_len+2+15) & ~15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		/* Packet processing deferred to DMA RX interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		SMC_SET_RX_CFG(lp, RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		SMC_PULL_DATA(lp, data, pkt_len+2+3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		DBG(SMC_DEBUG_PKTS, dev, "Received packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		dev->stats.rx_bytes += pkt_len-4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  * This is called to actually send a packet to the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static void smc911x_hardware_send_pkt(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	unsigned int cmdA, cmdB, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	BUG_ON(lp->pending_tx_skb == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	skb = lp->pending_tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	lp->pending_tx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	/* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	/* cmdB {31:16] pkt tag [10:0] length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	/* 16 byte buffer alignment mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	buf = (char*)((u32)(skb->data) & ~0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	buf = (char*)((u32)skb->data & ~0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	cmdA = (((u32)skb->data & 0x3) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	/* tag is packet length so we can use this in stats update later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	cmdB = (skb->len  << 16) | (skb->len & 0x7FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	DBG(SMC_DEBUG_TX, dev, "TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	    len, len, buf, cmdA, cmdB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	SMC_SET_TX_FIFO(lp, cmdA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	SMC_SET_TX_FIFO(lp, cmdB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	DBG(SMC_DEBUG_PKTS, dev, "Transmitted packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	PRINT_PKT(buf, len <= 64 ? len : 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	/* Send pkt via PIO or DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	lp->current_tx_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	SMC_PUSH_DATA(lp, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	/* DMA complete IRQ will free buffer and set jiffies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	SMC_PUSH_DATA(lp, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	netif_trans_update(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	if (!lp->tx_throttle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  * Since I am not sure if I will have enough room in the chip's ram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  * to store the packet, I call this routine which either sends it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  * now, or set the card to generates an interrupt when ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  * for the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	unsigned int free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	    __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	BUG_ON(lp->pending_tx_skb != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	DBG(SMC_DEBUG_TX, dev, "TX free space %d\n", free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	/* Turn off the flow when running out of space in FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		DBG(SMC_DEBUG_TX, dev, "Disabling data flow due to low FIFO space (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		    free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		/* Reenable when at least 1 packet of size MTU present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		lp->tx_throttle = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	/* Drop packets when we run out of space in TX FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	 * Account for overhead required for:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	 *	  Tx command words			 8 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	 *	  Start offset				 15 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	 *	  End padding				 15 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	if (unlikely(free < (skb->len + 8 + 15 + 15))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		netdev_warn(dev, "No Tx free space %d < %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			    free, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		lp->pending_tx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		/* If the DMA is already running then defer this packet Tx until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		 * the DMA IRQ starts it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		if (lp->txdma_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 			DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Tx DMA running, deferring packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			lp->pending_tx_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "Activating Tx DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			lp->txdma_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	lp->pending_tx_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	smc911x_hardware_send_pkt(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  * This handles a TX status interrupt, which is only called when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * - a TX error occurred, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  * - TX of a packet completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) static void smc911x_tx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	unsigned int tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, dev, "--> %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	    __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	/* Collect the TX status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		DBG(SMC_DEBUG_TX, dev, "Tx stat FIFO used 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		    (SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		tx_status = SMC_GET_TX_STS_FIFO(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		dev->stats.tx_bytes+=tx_status>>16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		DBG(SMC_DEBUG_TX, dev, "Tx FIFO tag 0x%04x status 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		    (tx_status & 0xffff0000) >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		    tx_status & 0x0000ffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		/* count Tx errors, but ignore lost carrier errors when in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		 * full-duplex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		    !(tx_status & 0x00000306))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		if (tx_status & TX_STS_MANY_COLL_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 			dev->stats.collisions+=16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			dev->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		/* carrier error only has meaning for half-duplex communication */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		    !lp->ctl_rfduplx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			dev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		if (tx_status & TX_STS_LATE_COLL_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  * Reads a register from the MII Management serial interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	unsigned int phydata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	SMC_GET_MII(lp, phyreg, phyaddr, phydata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	    __func__, phyaddr, phyreg, phydata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return phydata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * Writes a register to the MII Management serial interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			int phydata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	DBG(SMC_DEBUG_MISC, dev, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	    __func__, phyaddr, phyreg, phydata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	SMC_SET_MII(lp, phyreg, phyaddr, phydata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  * Finds and reports the PHY address (115 and 117 have external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * PHY interface 118 has internal only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) static void smc911x_phy_detect(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	int phyaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	unsigned int cfg, id1, id2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	lp->phy_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	 * Scan all 32 PHY addresses if necessary, starting at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	 * PHY#1 to PHY#31, and then PHY#0 last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	switch(lp->version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		case CHIP_9115:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		case CHIP_9117:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		case CHIP_9215:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		case CHIP_9217:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			cfg = SMC_GET_HW_CFG(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			if (cfg & HW_CFG_EXT_PHY_DET_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 				cfg &= ~HW_CFG_PHY_CLK_SEL_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 				cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				SMC_SET_HW_CFG(lp, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				udelay(10); /* Wait for clocks to stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 				cfg |= HW_CFG_EXT_PHY_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 				SMC_SET_HW_CFG(lp, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 				udelay(10); /* Wait for clocks to stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 				cfg &= ~HW_CFG_PHY_CLK_SEL_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 				cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 				SMC_SET_HW_CFG(lp, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 				udelay(10); /* Wait for clocks to stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 				cfg |= HW_CFG_SMI_SEL_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 				SMC_SET_HW_CFG(lp, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 				for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 					/* Read the PHY identifiers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 					SMC_GET_PHY_ID1(lp, phyaddr & 31, id1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 					SMC_GET_PHY_ID2(lp, phyaddr & 31, id2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 					/* Make sure it is a valid identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 					if (id1 != 0x0000 && id1 != 0xffff &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 					    id1 != 0x8000 && id2 != 0x0000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 					    id2 != 0xffff && id2 != 0x8000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 						/* Save the PHY's address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 						lp->mii.phy_id = phyaddr & 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 						lp->phy_type = id1 << 16 | id2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 				if (phyaddr < 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 					/* Found an external PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			/* Internal media only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			SMC_GET_PHY_ID1(lp, 1, id1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			SMC_GET_PHY_ID2(lp, 1, id2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			/* Save the PHY's address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			lp->mii.phy_id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			lp->phy_type = id1 << 16 | id2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	    id1, id2, lp->mii.phy_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730)  * Sets the PHY to a configuration as determined by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731)  * Called with spin_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) static int smc911x_phy_fixed(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	int phyaddr = lp->mii.phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	int bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	/* Enter Link Disable state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	bmcr |= BMCR_PDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 * Set our fixed capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 * Disable auto-negotiation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	bmcr &= ~BMCR_ANENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	if (lp->ctl_rfduplx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		bmcr |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	if (lp->ctl_rspeed == 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		bmcr |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	/* Write our capabilities to the phy control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	/* Re-Configure the Receive/Phy Control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	bmcr &= ~BMCR_PDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768)  * smc911x_phy_reset - reset the phy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769)  * @dev: net device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770)  * @phy: phy address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772)  * Issue a software reset for the specified PHY and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773)  * wait up to 100ms for the reset to complete.	 We should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774)  * not access the PHY for 50ms after issuing the reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  * The time to wait appears to be dependent on the PHY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) static int smc911x_phy_reset(struct net_device *dev, int phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	reg = SMC_GET_PMT_CTRL(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	reg &= ~0xfffff030;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	reg |= PMT_CTRL_PHY_RST_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	SMC_SET_PMT_CTRL(lp, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	for (timeout = 2; timeout; timeout--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		reg = SMC_GET_PMT_CTRL(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		if (!(reg & PMT_CTRL_PHY_RST_)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			/* extra delay required because the phy may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			 * not be completed with its reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			 * when PHY_BCR_RESET_ is cleared. 256us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			 * should suffice, but use 500us to be safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			udelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	return reg & PMT_CTRL_PHY_RST_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  * smc911x_phy_powerdown - powerdown phy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  * @dev: net device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  * @phy: phy address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  * Power down the specified PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) static void smc911x_phy_powerdown(struct net_device *dev, int phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	unsigned int bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	/* Enter Link Disable state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	SMC_GET_PHY_BMCR(lp, phy, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	bmcr |= BMCR_PDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	SMC_SET_PHY_BMCR(lp, phy, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832)  * smc911x_phy_check_media - check the media status and adjust BMCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833)  * @dev: net device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  * @init: set true for initialisation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * Select duplex mode depending on negotiation state.	This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * also updates our carrier state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) static void smc911x_phy_check_media(struct net_device *dev, int init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	int phyaddr = lp->mii.phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	unsigned int bmcr, cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		/* duplex state has changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		SMC_GET_MAC_CR(lp, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		if (lp->mii.full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			DBG(SMC_DEBUG_MISC, dev, "Configuring for full-duplex mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			bmcr |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			cr |= MAC_CR_RCVOWN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			DBG(SMC_DEBUG_MISC, dev, "Configuring for half-duplex mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 			bmcr &= ~BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			cr &= ~MAC_CR_RCVOWN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		SMC_SET_PHY_BMCR(lp, phyaddr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		SMC_SET_MAC_CR(lp, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  * Configures the specified PHY through the MII management interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * using Autonegotiation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  * Calls smc911x_phy_fixed() if the user has requested a certain config.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  * If RPC ANEG bit is set, the media selection is dependent purely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  * the selection by the MII (either in the MII BMCR reg or the result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  * of autonegotiation.)  If the RPC ANEG bit is cleared, the selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  * is controlled by the RPC SPEED and RPC DPLX bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) static void smc911x_phy_configure(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	struct smc911x_local *lp = container_of(work, struct smc911x_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 						phy_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	struct net_device *dev = lp->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	int phyaddr = lp->mii.phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	int my_phy_caps; /* My PHY capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	int my_ad_caps; /* My Advertised capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	 * We should not be called if phy_type is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	if (lp->phy_type == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	if (smc911x_phy_reset(dev, phyaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		netdev_info(dev, "PHY reset timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	 * Enable PHY Interrupts (for register 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	 * Interrupts listed here are enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	SMC_SET_PHY_INT_MASK(lp, phyaddr, PHY_INT_MASK_ENERGY_ON_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		 PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		 PHY_INT_MASK_LINK_DOWN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	/* If the user requested no auto neg, then go set his request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	if (lp->mii.force_media) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		smc911x_phy_fixed(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		goto smc911x_phy_configure_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	/* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	SMC_GET_PHY_BMSR(lp, phyaddr, my_phy_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		netdev_info(dev, "Auto negotiation NOT supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		smc911x_phy_fixed(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		goto smc911x_phy_configure_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	/* CSMA capable w/ both pauses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	if (my_phy_caps & BMSR_100BASE4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		my_ad_caps |= ADVERTISE_100BASE4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (my_phy_caps & BMSR_100FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		my_ad_caps |= ADVERTISE_100FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	if (my_phy_caps & BMSR_100HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		my_ad_caps |= ADVERTISE_100HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (my_phy_caps & BMSR_10FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		my_ad_caps |= ADVERTISE_10FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	if (my_phy_caps & BMSR_10HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		my_ad_caps |= ADVERTISE_10HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	/* Disable capabilities not selected by our user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	if (lp->ctl_rspeed != 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	if (!lp->ctl_rfduplx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	/* Update our Auto-Neg Advertisement Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	SMC_SET_PHY_MII_ADV(lp, phyaddr, my_ad_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	lp->mii.advertising = my_ad_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	 * Read the register back.	 Without this, it appears that when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	 * auto-negotiation is restarted, sometimes it isn't ready and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	 * the link does not come up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	SMC_GET_PHY_MII_ADV(lp, phyaddr, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	DBG(SMC_DEBUG_MISC, dev, "phy caps=0x%04x\n", my_phy_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	DBG(SMC_DEBUG_MISC, dev, "phy advertised caps=0x%04x\n", my_ad_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	/* Restart auto-negotiation process in order to advertise my caps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	SMC_SET_PHY_BMCR(lp, phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	smc911x_phy_check_media(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) smc911x_phy_configure_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967)  * smc911x_phy_interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  * Purpose:  Handle interrupts relating to PHY register 18. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  *	 called from the "hard" interrupt handler under our private spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) static void smc911x_phy_interrupt(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	int phyaddr = lp->mii.phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (lp->phy_type == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	smc911x_phy_check_media(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	/* read to clear status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	SMC_GET_PHY_INT_SRC(lp, phyaddr,status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	DBG(SMC_DEBUG_MISC, dev, "PHY interrupt status 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	    status & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	DBG(SMC_DEBUG_MISC, dev, "AFC_CFG 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	    SMC_GET_AFC_CFG(lp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) /*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * This is the main routine of the driver, to handle the device when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  * it needs some attention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	unsigned int status, mask, timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	unsigned int rx_overrun=0, cr, pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	/* Spurious interrupt check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if ((SMC_GET_IRQ_CFG(lp) & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		(INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	mask = SMC_GET_INT_EN(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	SMC_SET_INT_EN(lp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	/* set a timeout value, so I don't stay here forever */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	timeout = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		status = SMC_GET_INT(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		DBG(SMC_DEBUG_MISC, dev, "INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		    status, mask, status & ~mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		status &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		/* Handle SW interrupt condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		if (status & INT_STS_SW_INT_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			SMC_ACK_INT(lp, INT_STS_SW_INT_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			mask &= ~INT_EN_SW_INT_EN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		/* Handle various error conditions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		if (status & INT_STS_RXE_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			SMC_ACK_INT(lp, INT_STS_RXE_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		if (status & INT_STS_RXDFH_INT_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			SMC_ACK_INT(lp, INT_STS_RXDFH_INT_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			dev->stats.rx_dropped+=SMC_GET_RX_DROP(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		/* Undocumented interrupt-what is the right thing to do here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		if (status & INT_STS_RXDF_INT_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			SMC_ACK_INT(lp, INT_STS_RXDF_INT_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		/* Rx Data FIFO exceeds set level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		if (status & INT_STS_RDFL_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			if (IS_REV_A(lp->revision)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 				rx_overrun=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 				SMC_GET_MAC_CR(lp, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 				cr &= ~MAC_CR_RXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 				SMC_SET_MAC_CR(lp, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 				DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 				dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 				dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			SMC_ACK_INT(lp, INT_STS_RDFL_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		if (status & INT_STS_RDFO_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			if (!IS_REV_A(lp->revision)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				SMC_GET_MAC_CR(lp, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 				cr &= ~MAC_CR_RXEN_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 				SMC_SET_MAC_CR(lp, cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 				rx_overrun=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 				DBG(SMC_DEBUG_RX, dev, "RX overrun\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 				dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			SMC_ACK_INT(lp, INT_STS_RDFO_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		/* Handle receive condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		if ((status & INT_STS_RSFL_) || rx_overrun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			unsigned int fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			DBG(SMC_DEBUG_RX, dev, "RX irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			fifo = SMC_GET_RX_FIFO_INF(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			DBG(SMC_DEBUG_RX, dev, "Rx FIFO pkts %d, bytes %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			    pkts, fifo & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			if (pkts != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 				unsigned int fifo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 				if (lp->rxdma_active){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 					DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 					    "RX DMA active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 					/* The DMA is already running so up the IRQ threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 					fifo = SMC_GET_FIFO_INT(lp) & ~0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 					fifo |= pkts & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 					DBG(SMC_DEBUG_RX, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 					    "Setting RX stat FIFO threshold to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 					    fifo & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 					SMC_SET_FIFO_INT(lp, fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				smc911x_rcv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			SMC_ACK_INT(lp, INT_STS_RSFL_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		/* Handle transmit FIFO available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		if (status & INT_STS_TDFA_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			DBG(SMC_DEBUG_TX, dev, "TX data FIFO space available irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			SMC_SET_FIFO_TDA(lp, 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			lp->tx_throttle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			if (!lp->txdma_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 				netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			SMC_ACK_INT(lp, INT_STS_TDFA_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		/* Handle transmit done condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			    "Tx stat FIFO limit (%d) /GPT irq\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			    (SMC_GET_FIFO_INT(lp) & 0x00ff0000) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			smc911x_tx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			SMC_ACK_INT(lp, INT_STS_TSFL_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 			SMC_ACK_INT(lp, INT_STS_TSFL_ | INT_STS_GPT_INT_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		if (status & INT_STS_TSFL_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			DBG(SMC_DEBUG_TX, dev, "TX status FIFO limit (%d) irq\n", ?);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			smc911x_tx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			SMC_ACK_INT(lp, INT_STS_TSFL_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		if (status & INT_STS_GPT_INT_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 			DBG(SMC_DEBUG_RX, dev, "IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			    SMC_GET_IRQ_CFG(lp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			    SMC_GET_FIFO_INT(lp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			    SMC_GET_RX_CFG(lp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			DBG(SMC_DEBUG_RX, dev, "Rx Stat FIFO Used 0x%02x Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			    (SMC_GET_RX_FIFO_INF(lp) & 0x00ff0000) >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			    SMC_GET_RX_FIFO_INF(lp) & 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			    SMC_GET_RX_STS_FIFO_PEEK(lp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			SMC_ACK_INT(lp, INT_STS_GPT_INT_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		/* Handle PHY interrupt condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		if (status & INT_STS_PHY_INT_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			DBG(SMC_DEBUG_MISC, dev, "PHY irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			smc911x_phy_interrupt(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 			SMC_ACK_INT(lp, INT_STS_PHY_INT_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	} while (--timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	/* restore mask state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	SMC_SET_INT_EN(lp, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	DBG(SMC_DEBUG_MISC, dev, "Interrupt done (%d loops)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	    8-timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) smc911x_tx_dma_irq(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	struct smc911x_local *lp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	struct net_device *dev = lp->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	struct sk_buff *skb = lp->current_tx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	BUG_ON(skb == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	dma_unmap_single(lp->dev, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	netif_trans_update(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	lp->current_tx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	if (lp->pending_tx_skb != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		smc911x_hardware_send_pkt(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		    "No pending Tx packets. DMA disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		lp->txdma_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		if (!lp->tx_throttle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 			netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	    "TX DMA irq completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) smc911x_rx_dma_irq(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	struct smc911x_local *lp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	struct net_device *dev = lp->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	struct sk_buff *skb = lp->current_rx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	unsigned int pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	dma_unmap_single(lp->dev, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	BUG_ON(skb == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	lp->current_rx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	PRINT_PKT(skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	dev->stats.rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	if (pkts != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		smc911x_rcv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	}else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		lp->rxdma_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	    "RX DMA irq completed. DMA RX FIFO PKTS %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	    pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) #endif	 /* SMC_USE_DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)  * Polling receive - used by netconsole and other diagnostic tools
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)  * to allow network i/o with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static void smc911x_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	disable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	smc911x_interrupt(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	enable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /* Our watchdog timed out. Called by the networking layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) static void smc911x_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	int status, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	status = SMC_GET_INT(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	mask = SMC_GET_INT_EN(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	DBG(SMC_DEBUG_MISC, dev, "INT 0x%02x MASK 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	    status, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	/* Dump the current TX FIFO contents and restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	mask = SMC_GET_TX_CFG(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	SMC_SET_TX_CFG(lp, mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	 * Reconfiguring the PHY doesn't seem like a bad idea here, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	 * smc911x_phy_configure() calls msleep() which calls schedule_timeout()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	 * which calls schedule().	 Hence we use a work queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	if (lp->phy_type != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		schedule_work(&lp->phy_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	/* We can accept TX packets again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)  * This routine will, depending on the values passed to it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)  * either make it accept multicast packets, go into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)  * promiscuous mode (for TCPDUMP and cousins) or accept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  * a select set of multicast packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static void smc911x_set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	unsigned int multicast_table[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	unsigned int mcr, update_multicast = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	SMC_GET_MAC_CR(lp, mcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		DBG(SMC_DEBUG_MISC, dev, "RCR_PRMS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		mcr |= MAC_CR_PRMS_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	 * Here, I am setting this to accept all multicast packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	 * I don't need to zero the multicast table, because the flag is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	 * checked before the table is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		DBG(SMC_DEBUG_MISC, dev, "RCR_ALMUL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		mcr |= MAC_CR_MCPAS_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	 * This sets the internal hardware table to filter out unwanted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	 * multicast packets before they take up memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	 * The SMC chip uses a hash table where the high 6 bits of the CRC of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	 * address are the offset into the table.	If that bit is 1, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	 * multicast packet is accepted.  Otherwise, it's dropped silently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	 * To use the 6 bits as an offset into the table, the high 1 bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	 * the number of the 32 bit register, while the low 5 bits are the bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	 * within that register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	else if (!netdev_mc_empty(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		/* Set the Hash perfec mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		mcr |= MAC_CR_HPFILT_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		/* start with a table of all zeros: reject all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		memset(multicast_table, 0, sizeof(multicast_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			u32 position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			/* upper 6 bits are used as hash index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			position = ether_crc(ETH_ALEN, ha->addr)>>26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			multicast_table[position>>5] |= 1 << (position&0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		/* be sure I get rid of flags I might have set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		/* now, the table can be loaded into the chipset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		update_multicast = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	} else	 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		DBG(SMC_DEBUG_MISC, dev, "~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		 * since I'm disabling all multicast entirely, I need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		 * clear the multicast list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		memset(multicast_table, 0, sizeof(multicast_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		update_multicast = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	SMC_SET_MAC_CR(lp, mcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	if (update_multicast) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		DBG(SMC_DEBUG_MISC, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		    "update mcast hash table 0x%08x 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		    multicast_table[0], multicast_table[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		SMC_SET_HASHL(lp, multicast_table[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		SMC_SET_HASHH(lp, multicast_table[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)  * Open and Initialize the board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)  * Set up everything, reset the card, etc..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) smc911x_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	/* reset the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	smc911x_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	/* Configure the PHY, initialize the link state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	smc911x_phy_configure(&lp->phy_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	/* Turn on Tx + Rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	smc911x_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  * smc911x_close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  * this makes the board clean up everything that it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  * and not talk to the outside world.	 Caused by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)  * an 'ifconfig ethX down'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static int smc911x_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	/* clear everything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	smc911x_shutdown(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	if (lp->phy_type != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		/* We need to ensure that no calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		 * smc911x_phy_configure are pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		cancel_work_sync(&lp->phy_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		smc911x_phy_powerdown(dev, lp->mii.phy_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	if (lp->pending_tx_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		dev_kfree_skb(lp->pending_tx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		lp->pending_tx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)  * Ethtool support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) smc911x_ethtool_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 				   struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	u32 supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	if (lp->phy_type != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		mii_ethtool_get_link_ksettings(&lp->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		supported = SUPPORTED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 				SUPPORTED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 				SUPPORTED_TP | SUPPORTED_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		if (lp->ctl_rspeed == 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			cmd->base.speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		else if (lp->ctl_rspeed == 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 			cmd->base.speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		cmd->base.autoneg = AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		cmd->base.port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		SMC_GET_PHY_SPECIAL(lp, lp->mii.phy_id, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		cmd->base.duplex =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 			(status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 				DUPLEX_FULL : DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		ethtool_convert_legacy_u32_to_link_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 			cmd->link_modes.supported, supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) smc911x_ethtool_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 				   const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	if (lp->phy_type != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		if (cmd->base.autoneg != AUTONEG_DISABLE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		    cmd->base.speed != SPEED_10 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		    (cmd->base.duplex != DUPLEX_HALF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		     cmd->base.duplex != DUPLEX_FULL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		    (cmd->base.port != PORT_TP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		     cmd->base.port != PORT_AUI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	strlcpy(info->driver, CARDNAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	strlcpy(info->version, version, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	strlcpy(info->bus_info, dev_name(dev->dev.parent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static int smc911x_ethtool_nwayreset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	if (lp->phy_type != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		ret = mii_nway_restart(&lp->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static u32 smc911x_ethtool_getmsglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	return lp->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	lp->msg_enable = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static int smc911x_ethtool_getregslen(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	/* System regs + MAC regs + PHY regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	return (((E2P_CMD - ID_REV)/4 + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 			(WUCSR - MAC_CR)+1 + 32) * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static void smc911x_ethtool_getregs(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 										 struct ethtool_regs* regs, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	u32 reg,i,j=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	u32 *data = (u32*)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	regs->version = lp->version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	for(i=ID_REV;i<=E2P_CMD;i+=4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		data[j++] = SMC_inl(lp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	for(i=MAC_CR;i<=WUCSR;i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		SMC_GET_MAC_CSR(lp, i, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		data[j++] = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	for(i=0;i<=31;i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		SMC_GET_MII(lp, i, lp->mii.phy_id, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		data[j++] = reg & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	unsigned int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	int e2p_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	e2p_cmd = SMC_GET_E2P_CMD(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 			PRINTK(dev, "%s timeout waiting for EEPROM to respond\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 			       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		e2p_cmd = SMC_GET_E2P_CMD(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	if (timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		PRINTK(dev, "%s timeout waiting for EEPROM CMD not busy\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		       __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 													int cmd, int addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	SMC_SET_E2P_CMD(lp, E2P_CMD_EPC_BUSY_ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		((cmd) & (0x7<<28)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		((addr) & 0xFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 													u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	*data = SMC_GET_E2P_DATA(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 													 u8 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	SMC_SET_E2P_DATA(lp, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) static int smc911x_ethtool_geteeprom(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 									  struct ethtool_eeprom *eeprom, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	u8 eebuf[SMC911X_EEPROM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	for(i=0;i<SMC911X_EEPROM_LEN;i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	memcpy(data, eebuf+eeprom->offset, eeprom->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) static int smc911x_ethtool_seteeprom(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 									   struct ethtool_eeprom *eeprom, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	/* Enable erase */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		/* erase byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		/* write byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			 return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	 return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	 return SMC911X_EEPROM_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) static const struct ethtool_ops smc911x_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	.get_drvinfo	 = smc911x_ethtool_getdrvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	.get_msglevel	 = smc911x_ethtool_getmsglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	.set_msglevel	 = smc911x_ethtool_setmsglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	.nway_reset = smc911x_ethtool_nwayreset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	.get_link	 = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	.get_regs_len	 = smc911x_ethtool_getregslen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	.get_regs	 = smc911x_ethtool_getregs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	.get_eeprom_len = smc911x_ethtool_geteeprom_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	.get_eeprom = smc911x_ethtool_geteeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	.set_eeprom = smc911x_ethtool_seteeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	.get_link_ksettings	 = smc911x_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	.set_link_ksettings	 = smc911x_ethtool_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)  * smc911x_findirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)  * This routine has a simple purpose -- make the SMC chip generate an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)  * interrupt, so an auto-detect routine can detect it, and find the IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) static int smc911x_findirq(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	int timeout = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	unsigned long cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	cookie = probe_irq_on();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	 * Force a SW interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	SMC_SET_INT_EN(lp, INT_EN_SW_INT_EN_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	 * Wait until positive that the interrupt has been generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		int int_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		int_status = SMC_GET_INT_EN(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		if (int_status & INT_EN_SW_INT_EN_)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 			 break;		/* got the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	} while (--timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	 * there is really nothing that I can do here if timeout fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	 * as autoirq_report will return a 0 anyway, which is what I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	 * want in this case.	 Plus, the clean up is needed in both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	 * cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	/* and disable all interrupts again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	SMC_SET_INT_EN(lp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	/* and return what I found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	return probe_irq_off(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static const struct net_device_ops smc911x_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	.ndo_open		= smc911x_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	.ndo_stop		= smc911x_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	.ndo_start_xmit		= smc911x_hard_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	.ndo_tx_timeout		= smc911x_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	.ndo_set_rx_mode	= smc911x_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	.ndo_set_mac_address	= eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	.ndo_poll_controller	= smc911x_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)  * Function: smc911x_probe(unsigned long ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  * Purpose:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  *	 Tests to see if a given ioaddr points to an SMC911x chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)  *	 Returns a 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)  * Algorithm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)  *	 (1) see if the endian word is OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)  *	 (1) see if I recognize the chip ID in the appropriate register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)  * Here I do typical initialization tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)  * o  Initialize the structure if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)  * o  print out my vanity message if not done so already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)  * o  print out what type of hardware is detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)  * o  print out the ethernet address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)  * o  find the IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)  * o  set up my private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)  * o  configure the dev structure with my subroutines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)  * o  actually GRAB the irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)  * o  GRAB the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) static int smc911x_probe(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	struct smc911x_local *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	int i, retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	unsigned int val, chip_id, revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	const char *version_string;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	struct dma_slave_config	config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	/* First, see if the endian word is recognized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	val = SMC_GET_BYTE_TEST(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	DBG(SMC_DEBUG_MISC, dev, "%s: endian probe returned 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	    CARDNAME, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	if (val != 0x87654321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		netdev_err(dev, "Invalid chip endian 0x%08x\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		retval = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	 * check if the revision register is something that I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	 * recognize.	These might need to be added to later,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	 * as future revisions could be added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	chip_id = SMC_GET_PN(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	DBG(SMC_DEBUG_MISC, dev, "%s: id probe returned 0x%04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	    CARDNAME, chip_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	for(i=0;chip_ids[i].id != 0; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		if (chip_ids[i].id == chip_id) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	if (!chip_ids[i].id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		netdev_err(dev, "Unknown chip ID %04x\n", chip_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		retval = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	version_string = chip_ids[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	revision = SMC_GET_REV(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	DBG(SMC_DEBUG_MISC, dev, "%s: revision = 0x%04x\n", CARDNAME, revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	/* At this point I'll assume that the chip is an SMC911x. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	DBG(SMC_DEBUG_MISC, dev, "%s: Found a %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	    CARDNAME, chip_ids[i].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	/* Validate the TX FIFO size requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		netdev_err(dev, "Invalid TX FIFO size requested %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 			   tx_fifo_kb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	/* fill in some of the fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	lp->version = chip_ids[i].id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	lp->revision = revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	lp->tx_fifo_kb = tx_fifo_kb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	/* Reverse calculate the RX FIFO size from the TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	/* Set the automatic flow control values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	switch(lp->tx_fifo_kb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		 *	 AFC_HI is about ((Rx Data Fifo Size)*2/3)/64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		 *	 AFC_LO is AFC_HI/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		 *	 BACK_DUR is about 5uS*(AFC_LO) rounded down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		case 2:/* 13440 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			lp->afc_cfg=0x008C46AF;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		case 3:/* 12480 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 			lp->afc_cfg=0x0082419F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		case 4:/* 11520 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			lp->afc_cfg=0x00783C9F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		case 5:/* 10560 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			lp->afc_cfg=0x006E374F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		case 6:/* 9600 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			lp->afc_cfg=0x0064328F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		case 7:/* 8640 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 			lp->afc_cfg=0x005A2D7F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		case 8:/* 7680 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			lp->afc_cfg=0x0050287F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		case 9:/* 6720 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 			lp->afc_cfg=0x0046236F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		case 10:/* 5760 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			lp->afc_cfg=0x003C1E6F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		case 11:/* 4800 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			lp->afc_cfg=0x0032195F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		 *	 AFC_HI is ~1520 bytes less than RX Data Fifo Size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		 *	 AFC_LO is AFC_HI/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		 *	 BACK_DUR is about 5uS*(AFC_LO) rounded down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		case 12:/* 3840 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 			lp->afc_cfg=0x0024124F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		case 13:/* 2880 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 			lp->afc_cfg=0x0015073F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		case 14:/* 1920 Rx Data Fifo Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 			lp->afc_cfg=0x0006032F;break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		 default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 			 PRINTK(dev, "ERROR -- no AFC_CFG setting found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 			 break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	    "%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	    lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	spin_lock_init(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	/* Get the MAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	SMC_GET_MAC_ADDR(lp, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	/* now, reset the chip, and put it into a known state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	smc911x_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	 * If dev->irq is 0, then the device has to be banged on to see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	 * what the IRQ is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	 * Specifying an IRQ is done with the assumption that the user knows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	 * what (s)he is doing.  No checking is done!!!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	if (dev->irq < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		int trials;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		trials = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		while (trials--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 			dev->irq = smc911x_findirq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 			if (dev->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 			/* kick the card and try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 			smc911x_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	if (dev->irq == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		netdev_warn(dev, "Couldn't autodetect your IRQ. Use irq=xx.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		retval = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	dev->irq = irq_canonicalize(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	dev->netdev_ops = &smc911x_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	dev->watchdog_timeo = msecs_to_jiffies(watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	dev->ethtool_ops = &smc911x_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	INIT_WORK(&lp->phy_configure, smc911x_phy_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	lp->mii.phy_id_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	lp->mii.reg_num_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	lp->mii.force_media = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	lp->mii.full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	lp->mii.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	lp->mii.mdio_read = smc911x_phy_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	lp->mii.mdio_write = smc911x_phy_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	 * Locate the phy, if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	smc911x_phy_detect(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	/* Set default parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	lp->msg_enable = NETIF_MSG_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	lp->ctl_rfduplx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	lp->ctl_rspeed = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) #ifdef SMC_DYNAMIC_BUS_CONFIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	irq_flags = lp->cfg.irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	irq_flags = IRQF_SHARED | SMC_IRQ_SENSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	/* Grab the IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	retval = request_irq(dev->irq, smc911x_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 			     irq_flags, dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	lp->rxdma = dma_request_channel(mask, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	lp->txdma = dma_request_channel(mask, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	lp->rxdma_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	lp->txdma_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	memset(&config, 0, sizeof(config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	config.src_addr = lp->physaddr + RX_DATA_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	config.dst_addr = lp->physaddr + TX_DATA_FIFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	config.src_maxburst = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	config.dst_maxburst = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	retval = dmaengine_slave_config(lp->rxdma, &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		dev_err(lp->dev, "dma rx channel configuration failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 			retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	retval = dmaengine_slave_config(lp->txdma, &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		dev_err(lp->dev, "dma tx channel configuration failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 			retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	retval = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	if (retval == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		/* now, print out the card info, in a short format.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		netdev_info(dev, "%s (rev %d) at %#lx IRQ %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 			    version_string, lp->revision,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 			    dev->base_addr, dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		if (lp->rxdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			pr_cont(" RXDMA %p", lp->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		if (lp->txdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 			pr_cont(" TXDMA %p", lp->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		if (!is_valid_ether_addr(dev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 			netdev_warn(dev, "Invalid ethernet MAC address. Please set using ifconfig\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 			/* Print the Ethernet address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 			netdev_info(dev, "Ethernet addr: %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 				    dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		if (lp->phy_type == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			PRINTK(dev, "No PHY found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		} else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			PRINTK(dev, "LAN911x Internal PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			PRINTK(dev, "External PHY 0x%08x\n", lp->phy_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		if (lp->rxdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 			dma_release_channel(lp->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		if (lp->txdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			dma_release_channel(lp->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)  * smc911x_drv_probe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)  *	  Output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)  *	 0 --> there is a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)  *	 anything else, error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) static int smc911x_drv_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	struct smc911x_local *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	/* ndev is not valid yet, so avoid passing it in. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	DBG(SMC_DEBUG_FUNC, "--> %s\n",  __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	 * Request the regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		 ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		 goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	ndev = alloc_etherdev(sizeof(struct smc911x_local));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	if (!ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		goto release_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	SET_NETDEV_DEV(ndev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	ndev->dma = (unsigned char)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	ndev->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	if (ndev->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		ret = ndev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		goto release_both;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	lp = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	lp->netdev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) #ifdef SMC_DYNAMIC_BUS_CONFIG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		if (!pd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 			ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 			goto release_both;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		memcpy(&lp->cfg, pd, sizeof(lp->cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	addr = ioremap(res->start, SMC911X_IO_EXTENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		goto release_both;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	platform_set_drvdata(pdev, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	lp->base = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	ndev->base_addr = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	ret = smc911x_probe(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		iounmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) release_both:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) release_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		release_mem_region(res->start, SMC911X_IO_EXTENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		pr_info("%s: not found (%d).\n", CARDNAME, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		lp->physaddr = res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		lp->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) static int smc911x_drv_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	struct net_device *ndev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	struct smc911x_local *lp = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	unregister_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	free_irq(ndev->irq, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) #ifdef SMC_USE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		if (lp->rxdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 			dma_release_channel(lp->rxdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		if (lp->txdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 			dma_release_channel(lp->txdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	iounmap(lp->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	release_mem_region(res->start, SMC911X_IO_EXTENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	free_netdev(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	struct net_device *ndev = platform_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	struct smc911x_local *lp = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	if (ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		if (netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 			netif_device_detach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 			smc911x_shutdown(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) #if POWER_DOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 			/* Set D2 - Energy detect only setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 			SMC_SET_PMT_CTRL(lp, 2<<12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) static int smc911x_drv_resume(struct platform_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	struct net_device *ndev = platform_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	DBG(SMC_DEBUG_FUNC, ndev, "--> %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	if (ndev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		struct smc911x_local *lp = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		if (netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 			smc911x_reset(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 			if (lp->phy_type != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 				smc911x_phy_configure(&lp->phy_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 			smc911x_enable(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 			netif_device_attach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) static struct platform_driver smc911x_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	.probe		 = smc911x_drv_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	.remove	 = smc911x_drv_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	.suspend	 = smc911x_drv_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	.resume	 = smc911x_drv_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	.driver	 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		.name	 = CARDNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) module_platform_driver(smc911x_driver);