^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Written/copyright 1997-2001 by Donald Becker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) This software may be used and distributed according to the terms of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) the GNU General Public License (GPL), incorporated herein by reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) Drivers based on or derived from this code fall under the GPL and must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) retain the authorship, copyright and license notice. This file is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) a complete program and may only be used when the entire operating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) system is licensed under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) This driver is for the SMC83c170/175 "EPIC" series, as used on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) The author may be reached as becker@scyld.com, or C/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) Scyld Computing Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) 410 Severn Ave., Suite 210
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) Annapolis MD 21403
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) Information and updates available at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) http://www.scyld.com/network/epic100.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) [this link no longer provides anything useful -jgarzik]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) ---------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define DRV_NAME "epic100"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define DRV_VERSION "2.1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define DRV_RELDATE "Sept 11, 2006"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* The user-configurable values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) These may be modified when a driver module is loaded.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Used to pass the full-duplex flag, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define MAX_UNITS 8 /* More are supported, limit only on options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) Setting to > 1518 effectively disables this feature. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static int rx_copybreak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* Operational parameters that are set at compile time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* Keep the ring sizes a power of two for operational efficiency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) The compiler will convert <unsigned>'%'<2^N> into a bit mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) Making the Tx ring too large decreases the effectiveness of channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) bonding and packet priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) There are no ill effects from too-large receive rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define TX_RING_SIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define RX_RING_SIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Operational parameters that usually are not changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* Time in jiffies before concluding the transmitter is hung. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define TX_TIMEOUT (2*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Bytes transferred to chip before transmission starts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define TX_FIFO_THRESH 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* These identify the driver base version and may not be removed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static char version2[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) module_param(debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) module_param(rx_copybreak, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) module_param_array(options, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) module_param_array(full_duplex, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) Theory of Operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) I. Board Compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) This device driver is designed for the SMC "EPIC/100", the SMC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) single-chip Ethernet controllers for PCI. This chip is used on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) the SMC EtherPower II boards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) II. Board-specific settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) PCI bus devices are configured by the system at boot time, so no jumpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) need to be set on the board. The system BIOS will assign the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) PCI INTA signal to a (preferably otherwise unused) system IRQ line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) Note: Kernel versions earlier than 1.3.73 do not support shared PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) interrupt lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) III. Driver operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) IIIa. Ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) IVb. References
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) http://scyld.com/expert/NWay.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) http://www.national.com/pf/DP/DP83840A.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) IVc. Errata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define EPIC_TOTAL_SIZE 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define USE_IO_OPS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #ifdef USE_IO_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define EPIC_BAR 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define EPIC_BAR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) typedef enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) SMSC_83C170_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) SMSC_83C170,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) SMSC_83C175,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) } chip_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct epic_chip_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int drv_flags; /* Driver use, intended as capability flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* indexed by chip_t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static const struct epic_chip_info pci_id_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) { "SMSC EPIC/100 83c170", TYPE2_INTR },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static const struct pci_device_id epic_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) { 0,}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define ew16(reg, val) iowrite16(val, ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define ew32(reg, val) iowrite32(val, ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define er8(reg) ioread8(ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define er16(reg) ioread16(ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define er32(reg) ioread32(ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Offsets to registers, using the (ugh) SMC names. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) enum epic_registers {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) PCIBurstCnt=0x18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) LAN0=64, /* MAC address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) MC0=80, /* Multicast filter table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* Interrupt register bits, using my own meaningful names. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) enum IntrStatus {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) enum CommandBits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define EpicNapiEvent (TxEmpty | TxDone | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static const u16 media2miictl[16] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 0, 0, 0, 0, 0, 0, 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * The EPIC100 Rx and Tx buffer descriptors. Note that these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * really ARE host-endian; it's not a misannotation. We tell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * the card to byteswap them internally on big-endian hosts -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * look for #ifdef __BIG_ENDIAN in epic_open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct epic_tx_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 txstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u32 bufaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u32 buflength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct epic_rx_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 rxstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 bufaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u32 buflength;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u32 next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) enum desc_status_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) DescOwn=0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define PRIV_ALIGN 15 /* Required alignment mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct epic_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct epic_rx_desc *rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct epic_tx_desc *tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* The saved address of a sent-in-place packet/buffer, for skfree(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct sk_buff* tx_skbuff[TX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* The addresses of receive-in-place skbuffs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct sk_buff* rx_skbuff[RX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) dma_addr_t tx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) dma_addr_t rx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* Ring pointers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) spinlock_t lock; /* Group with Tx control cache line. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) spinlock_t napi_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) unsigned int cur_tx, dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) unsigned int cur_rx, dirty_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u32 irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) unsigned int rx_buf_sz; /* Based on MTU+slack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct pci_dev *pci_dev; /* PCI bus location. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int chip_id, chip_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct timer_list timer; /* Media selection timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int tx_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned char mc_filter[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) signed char phys[4]; /* MII device addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u16 advertising; /* NWay media advertisement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int mii_phy_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u32 ethtool_ops_nesting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct mii_if_info mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned int tx_full:1; /* The Tx queue is full. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned int default_port:4; /* Last dev->if_port value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int epic_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int read_eeprom(struct epic_private *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static int mdio_read(struct net_device *dev, int phy_id, int location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static void epic_restart(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void epic_timer(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void epic_init_ring(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static int epic_rx(struct net_device *dev, int budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static int epic_poll(struct napi_struct *napi, int budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static irqreturn_t epic_interrupt(int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static const struct ethtool_ops netdev_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int epic_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static struct net_device_stats *epic_get_stats(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static void set_rx_mode(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static const struct net_device_ops epic_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) .ndo_open = epic_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) .ndo_stop = epic_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) .ndo_start_xmit = epic_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) .ndo_tx_timeout = epic_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) .ndo_get_stats = epic_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) .ndo_set_rx_mode = set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) .ndo_do_ioctl = netdev_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static int card_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int chip_idx = (int) ent->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct epic_private *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int i, ret, option = 0, duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void *ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) dma_addr_t ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* when built into the kernel, we only print version if device is found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) pr_info_once("%s%s\n", version, version2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) card_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ret = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dev_err(&pdev->dev, "no PCI region space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) goto err_out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ret = pci_request_regions(pdev, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) goto err_out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dev = alloc_etherdev(sizeof (*ep));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!ioaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dev_err(&pdev->dev, "ioremap failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) goto err_out_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ep->ioaddr = ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ep->mii.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ep->mii.mdio_read = mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ep->mii.mdio_write = mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ep->mii.phy_id_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ep->mii.reg_num_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (!ring_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ep->tx_ring = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ep->tx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (!ring_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) goto err_out_unmap_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ep->rx_ring = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ep->rx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (dev->mem_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) option = dev->mem_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) duplex = (dev->mem_start & 16) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (options[card_idx] >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) option = options[card_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (full_duplex[card_idx] >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) duplex = full_duplex[card_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) spin_lock_init(&ep->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) spin_lock_init(&ep->napi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* Bring the chip out of low-power mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ew32(GENCTL, 0x4200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* Magic?! If we don't set this bit the MII interface won't work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* This magic is documented in SMSC app note 7.15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) for (i = 16; i > 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ew32(TEST1, 0x0008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Turn on the MII transceiver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ew32(MIICfg, 0x12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (chip_idx == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ew32(GENCTL, 0x0200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Note: the '175 does not have a serial EEPROM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (debug > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dev_dbg(&pdev->dev, "EEPROM contents:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) for (i = 0; i < 64; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) pr_cont(" %4.4x%s", read_eeprom(ep, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) i % 16 == 15 ? "\n" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ep->pci_dev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ep->chip_id = chip_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ep->irq_mask =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) | CntFull | TxUnderrun | EpicNapiEvent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Find the connected MII xcvrs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) Doing this in open() would allow detecting external xcvrs later, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) takes much time and no cards have external MII. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int phy, phy_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int mii_status = mdio_read(dev, phy, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (mii_status != 0xffff && mii_status != 0x0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ep->phys[phy_idx++] = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) "MII transceiver #%d control "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) "%4.4x status %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) phy, mdio_read(dev, phy, 0), mii_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ep->mii_phy_cnt = phy_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (phy_idx != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) phy = ep->phys[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) "Autonegotiation advertising %4.4x link "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) "partner %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ep->mii.advertising, mdio_read(dev, phy, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) } else if ( ! (ep->chip_flags & NO_MII)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) "***WARNING***: No MII transceiver found!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Use the known PHY address of the EPII. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ep->phys[0] = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ep->mii.phy_id = ep->phys[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (ep->chip_flags & MII_PWRDWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ew32(NVCTL, er32(NVCTL) & ~0x483c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ew32(GENCTL, 0x0008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* The lower four bits are the media type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ep->mii.force_media = ep->mii.full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dev_info(&pdev->dev, "Forced full duplex requested.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dev->if_port = ep->default_port = option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* The Epic-specific entries in the device structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dev->netdev_ops = &epic_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) dev->ethtool_ops = &netdev_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) netif_napi_add(dev, &ep->napi, epic_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ret = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto err_out_unmap_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) pci_id_tbl[chip_idx].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) err_out_unmap_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ep->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) err_out_unmap_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) ep->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) err_out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) pci_iounmap(pdev, ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) err_out_free_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) err_out_free_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) err_out_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* Serial EEPROM section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* EEPROM_Ctrl bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) #define EE_CS 0x02 /* EEPROM chip select. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #define EE_WRITE_0 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #define EE_WRITE_1 0x09
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #define EE_ENB (0x0001 | EE_CS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* Delay between EEPROM clock transitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) This serves to flush the operation to the PCI bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #define eeprom_delay() er32(EECTL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* The EEPROM commands include the alway-set leading bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) #define EE_WRITE_CMD (5 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) #define EE_READ64_CMD (6 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #define EE_READ256_CMD (6 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) #define EE_ERASE_CMD (7 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ew32(INTMASK, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static inline void __epic_pci_commit(void __iomem *ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #ifndef USE_IO_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) er32(INTMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static inline void epic_napi_irq_off(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct epic_private *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) __epic_pci_commit(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static inline void epic_napi_irq_on(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct epic_private *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* No need to commit possible posted write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static int read_eeprom(struct epic_private *ep, int location)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int read_cmd = location |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ew32(EECTL, EE_ENB & ~EE_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ew32(EECTL, EE_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* Shift the read command bits out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) for (i = 12; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ew32(EECTL, EE_ENB | dataval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) ew32(EECTL, EE_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) for (i = 16; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ew32(EECTL, EE_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* Terminate the EEPROM access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ew32(EECTL, EE_ENB & ~EE_CS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) #define MII_READOP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) #define MII_WRITEOP 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static int mdio_read(struct net_device *dev, int phy_id, int location)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ew32(MIICtrl, read_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* Typical operation takes 25 loops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) for (i = 400; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if ((er32(MIICtrl) & MII_READOP) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Work around read failure bug. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (phy_id == 1 && location < 6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) er16(MIIData) == 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ew32(MIICtrl, read_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return er16(MIIData);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ew16(MIIData, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) for (i = 10000; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if ((er32(MIICtrl) & MII_WRITEOP) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static int epic_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) const int irq = ep->pci_dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* Soft reset the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ew32(GENCTL, 0x4001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) napi_enable(&ep->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) napi_disable(&ep->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) epic_init_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) ew32(GENCTL, 0x4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* This magic is documented in SMSC app note 7.15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) for (i = 16; i > 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) ew32(TEST1, 0x0008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Pull the chip out of low-power mode, enable interrupts, and set for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) PCI read multiple. The MIIcfg setting and strange write order are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) required by the details of which bits are reset and the transceiver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) wiring on the Ositech CardBus card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (ep->chip_flags & MII_PWRDWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Tell the chip to byteswap descriptors on big-endian hosts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) er32(GENCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) er32(GENCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ep->tx_threshold = TX_FIFO_THRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ew32(TxThresh, ep->tx_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (media2miictl[dev->if_port & 15]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (ep->mii_phy_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (dev->if_port == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) mdio_read(dev, ep->phys[0], MII_BMSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (mii_lpa != 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ep->mii.full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) else if (! (mii_lpa & LPA_LPACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ep->mii.full_duplex ? "full"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) : "half",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ep->phys[0], mii_lpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ew32(PRxCDAR, ep->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ew32(PTxCDAR, ep->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* Start the chip's Rx process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) set_rx_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ew32(COMMAND, StartRx | RxQueued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Enable interrupts by setting the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) TxUnderrun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (debug > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ioaddr, irq, er32(GENCTL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ep->mii.full_duplex ? "full" : "half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* Set the timer to switch to check for link beat and perhaps switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) to an alternate media type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) timer_setup(&ep->timer, epic_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ep->timer.expires = jiffies + 3*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) add_timer(&ep->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Reset the chip to recover from a PCI transaction error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) This may occur at interrupt time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static void epic_pause(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct net_device_stats *stats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) netif_stop_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* Disable interrupts by clearing the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ew32(INTMASK, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* Stop the chip's Tx and Rx DMA processes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* Update the error counts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (er16(COMMAND) != 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) stats->rx_missed_errors += er8(MPCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) stats->rx_frame_errors += er8(ALICNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) stats->rx_crc_errors += er8(CRCCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* Remove the packets on the Rx queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) epic_rx(dev, RX_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static void epic_restart(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* Soft reset the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ew32(GENCTL, 0x4001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /* This magic is documented in SMSC app note 7.15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) for (i = 16; i > 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ew32(TEST1, 0x0008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (ep->chip_flags & MII_PWRDWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ep->tx_threshold = TX_FIFO_THRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ew32(TxThresh, ep->tx_threshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ew32(PRxCDAR, ep->rx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ew32(PTxCDAR, ep->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /* Start the chip's Rx process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) set_rx_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ew32(COMMAND, StartRx | RxQueued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* Enable interrupts by setting the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) TxUnderrun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) er32(COMMAND), er32(GENCTL), er32(INTSTAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) static void check_media(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) int negotiated = mii_lpa & ep->mii.advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (ep->mii.force_media)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (mii_lpa == 0xffff) /* Bogus read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (ep->mii.full_duplex != duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ep->mii.full_duplex = duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) ep->mii.full_duplex ? "full" : "half",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ep->phys[0], mii_lpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static void epic_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct epic_private *ep = from_timer(ep, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct net_device *dev = ep->mii.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) int next_tick = 5*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (debug > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) er32(TxSTAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) check_media(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ep->timer.expires = jiffies + next_tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) add_timer(&ep->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (debug > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) er16(TxSTAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (debug > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ep->dirty_tx, ep->cur_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) ew32(COMMAND, RestartTx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) epic_restart(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ew32(COMMAND, TxQueued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (!ep->tx_full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) static void epic_init_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) ep->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ep->dirty_tx = ep->cur_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ep->cur_rx = ep->dirty_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* Initialize all Rx descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ep->rx_ring[i].rxstatus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) ep->rx_ring[i].buflength = ep->rx_buf_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ep->rx_ring[i].next = ep->rx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) (i+1)*sizeof(struct epic_rx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ep->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* Mark the last entry as wrapping the ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ep->rx_ring[i-1].next = ep->rx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* Fill in the Rx buffers. Handle allocation failure gracefully. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) ep->rx_skbuff[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) skb_reserve(skb, 2); /* 16 byte align the IP header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ep->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ep->rx_ring[i].rxstatus = DescOwn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* The Tx buffer descriptor is filled in as needed, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) do need to clear the ownership bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ep->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ep->tx_ring[i].txstatus = 0x0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ep->tx_ring[i].next = ep->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) (i+1)*sizeof(struct epic_tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ep->tx_ring[i-1].next = ep->tx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int entry, free_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) u32 ctrl_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (skb_padto(skb, ETH_ZLEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* Caution: the write order is important here, set the field with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) "ownership" bit last. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* Calculate the next Tx descriptor entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) spin_lock_irqsave(&ep->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) free_count = ep->cur_tx - ep->dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) entry = ep->cur_tx % TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ep->tx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) skb->data, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ctrl_word = 0x100000; /* No interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) } else if (free_count == TX_QUEUE_LEN/2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ctrl_word = 0x140000; /* Tx-done intr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) } else if (free_count < TX_QUEUE_LEN - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) ctrl_word = 0x100000; /* No Tx-done intr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* Leave room for an additional entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) ctrl_word = 0x140000; /* Tx-done intr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ep->tx_full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ep->tx_ring[entry].buflength = ctrl_word | skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ep->tx_ring[entry].txstatus =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) | DescOwn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ep->cur_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (ep->tx_full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) spin_unlock_irqrestore(&ep->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* Trigger an immediate transmit demand. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ew32(COMMAND, TxQueued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (debug > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) skb->len, entry, ctrl_word, er32(TxSTAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct net_device_stats *stats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) #ifndef final_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* There was an major error, log it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) stats->tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (status & 0x1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) stats->tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (status & 0x0008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) stats->tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (status & 0x0040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) stats->tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (status & 0x0010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) stats->tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static void epic_tx(struct net_device *dev, struct epic_private *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) unsigned int dirty_tx, cur_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * Note: if this lock becomes a problem we can narrow the locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * region at the cost of occasionally grabbing the lock more times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) cur_tx = ep->cur_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int entry = dirty_tx % TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int txstatus = ep->tx_ring[entry].txstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (txstatus & DescOwn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) break; /* It still hasn't been Txed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (likely(txstatus & 0x0001)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dev->stats.collisions += (txstatus >> 8) & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) epic_tx_error(dev, ep, txstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* Free the original skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) skb = ep->tx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) dma_unmap_single(&ep->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ep->tx_ring[entry].bufaddr, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ep->tx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) #ifndef final_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (cur_tx - dirty_tx > TX_RING_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) dirty_tx, cur_tx, ep->tx_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) dirty_tx += TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) ep->dirty_tx = dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* The ring is no longer full, allow new TX entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ep->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* The interrupt handler does all of the Rx thread work and cleans up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) after the Tx thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static irqreturn_t epic_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct net_device *dev = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) status = er32(INTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* Acknowledge all of the current interrupt sources ASAP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ew32(INTSTAT, status & EpicNormalEvent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (debug > 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) status, er32(INTSTAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if ((status & IntrSummary) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (status & EpicNapiEvent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) spin_lock(&ep->napi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (napi_schedule_prep(&ep->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) epic_napi_irq_off(dev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) __napi_schedule(&ep->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) spin_unlock(&ep->napi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) status &= ~EpicNapiEvent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /* Check uncommon events all at once. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) struct net_device_stats *stats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (status == EpicRemoved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* Always update the error counts to avoid overhead later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) stats->rx_missed_errors += er8(MPCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) stats->rx_frame_errors += er8(ALICNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) stats->rx_crc_errors += er8(CRCCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (status & TxUnderrun) { /* Tx FIFO underflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) stats->tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) ew32(TxThresh, ep->tx_threshold += 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* Restart the transmit process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ew32(COMMAND, RestartTx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (status & PCIBusErr170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) epic_pause(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) epic_restart(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* Clear all error sources. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ew32(INTSTAT, status & 0x7f18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (debug > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static int epic_rx(struct net_device *dev, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int entry = ep->cur_rx % RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (debug > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ep->rx_ring[entry].rxstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (rx_work_limit > budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) rx_work_limit = budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* If we own the next entry, it's a new packet. Send it up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) int status = ep->rx_ring[entry].rxstatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (debug > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) netdev_dbg(dev, " epic_rx() status was %8.8x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (--rx_work_limit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (status & 0x2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (debug > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (status & 0x2000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) } else if (status & 0x0006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /* Rx Frame errors are counted in hardware. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /* Malloc up new buffer, compatible with net-2e. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* Omit the four octet CRC from the length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) short pkt_len = (status >> 16) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (pkt_len > PKT_BUF_SZ - 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) status, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) pkt_len = 1514;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) /* Check if the packet is long enough to accept without copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) to a minimally-sized skbuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (pkt_len < rx_copybreak &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) skb_reserve(skb, 2); /* 16 byte align the IP header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) dma_sync_single_for_cpu(&ep->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) ep->rx_ring[entry].bufaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ep->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) skb_put(skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) dma_sync_single_for_device(&ep->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) ep->rx_ring[entry].bufaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) ep->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) dma_unmap_single(&ep->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) ep->rx_ring[entry].bufaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ep->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) skb_put(skb = ep->rx_skbuff[entry], pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) ep->rx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) dev->stats.rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) work_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) entry = (++ep->cur_rx) % RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* Refill the Rx ring buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) entry = ep->dirty_rx % RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (ep->rx_skbuff[entry] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ep->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) work_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /* AV: shouldn't we add a barrier here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ep->rx_ring[entry].rxstatus = DescOwn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) status = er32(INTSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (status == EpicRemoved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (status & RxOverflow) /* Missed a Rx frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (status & (RxOverflow | RxFull))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) ew16(COMMAND, RxQueued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) static int epic_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct epic_private *ep = container_of(napi, struct epic_private, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct net_device *dev = ep->mii.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) epic_tx(dev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) work_done = epic_rx(dev, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) epic_rx_err(dev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (work_done < budget && napi_complete_done(napi, work_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) spin_lock_irqsave(&ep->napi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) ew32(INTSTAT, EpicNapiEvent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) epic_napi_irq_on(dev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) spin_unlock_irqrestore(&ep->napi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static int epic_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct pci_dev *pdev = ep->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) napi_disable(&ep->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) er32(INTSTAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) del_timer_sync(&ep->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) epic_disable_int(dev, ep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) free_irq(pdev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) epic_pause(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* Free all the skbuffs in the Rx queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) skb = ep->rx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) ep->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) ep->rx_ring[i].buflength = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) ep->rx_buf_sz, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) skb = ep->tx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) ep->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /* Green! Leave the chip in low-power mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) ew32(GENCTL, 0x0008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) static struct net_device_stats *epic_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct net_device_stats *stats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) stats->rx_missed_errors += er8(MPCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) stats->rx_frame_errors += er8(ALICNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) stats->rx_crc_errors += er8(CRCCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /* Set or clear the multicast filter for this adaptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) Note that we only use exclusion around actually queueing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) new frame, not around filling ep->setup_frame. This is non-deterministic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) when re-entered but still correct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static void set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) unsigned char mc_filter[8]; /* Multicast hash filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) ew32(RxCtrl, 0x002c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /* Unconditionally log net taps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) memset(mc_filter, 0xff, sizeof(mc_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) /* There is apparently a chip bug, so the multicast filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) is never enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /* Too many to filter perfectly -- accept all multicasts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) memset(mc_filter, 0xff, sizeof(mc_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) ew32(RxCtrl, 0x000c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) } else if (netdev_mc_empty(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) ew32(RxCtrl, 0x0004);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) } else { /* Never executed, for now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) memset(mc_filter, 0, sizeof(mc_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) unsigned int bit_nr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) mc_filter[bit_nr >> 3] |= (1 << bit_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* ToDo: perhaps we need to stop the Tx and Rx process here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct epic_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) strlcpy(info->version, DRV_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static int netdev_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct epic_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) spin_lock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) mii_ethtool_get_link_ksettings(&np->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) spin_unlock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static int netdev_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) struct epic_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) spin_lock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) spin_unlock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static int netdev_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct epic_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return mii_nway_restart(&np->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static u32 netdev_get_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) struct epic_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return mii_link_ok(&np->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static u32 netdev_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) static void netdev_set_msglevel(struct net_device *dev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) debug = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static int ethtool_begin(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (ep->ethtool_ops_nesting == U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /* power-up, if interface is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) ew32(GENCTL, 0x0200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static void ethtool_complete(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /* power-down, if interface is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (!--ep->ethtool_ops_nesting && !netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) ew32(GENCTL, 0x0008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static const struct ethtool_ops netdev_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) .get_drvinfo = netdev_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) .nway_reset = netdev_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) .get_link = netdev_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) .get_msglevel = netdev_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) .set_msglevel = netdev_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) .begin = ethtool_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) .complete = ethtool_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) .get_link_ksettings = netdev_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) .set_link_ksettings = netdev_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) struct epic_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct mii_ioctl_data *data = if_mii(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /* power-up, if interface is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (! netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) ew32(GENCTL, 0x0200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) spin_lock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) spin_unlock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /* power-down, if interface is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (! netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) ew32(GENCTL, 0x0008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) static void epic_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) ep->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) ep->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) pci_iounmap(pdev, ep->ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /* pci_power_off(pdev, -1); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) static int __maybe_unused epic_suspend(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct epic_private *ep = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) void __iomem *ioaddr = ep->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) epic_pause(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) /* Put the chip into low-power mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) ew32(GENCTL, 0x0008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) /* pci_power_off(pdev, -1); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) static int __maybe_unused epic_resume(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) epic_restart(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) /* pci_power_on(pdev); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) static SIMPLE_DEV_PM_OPS(epic_pm_ops, epic_suspend, epic_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) static struct pci_driver epic_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) .id_table = epic_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) .probe = epic_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) .remove = epic_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) .driver.pm = &epic_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) static int __init epic_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) /* when a module, this is printed whether or not devices are found in probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) pr_info("%s%s\n", version, version2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return pci_register_driver(&epic_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) static void __exit epic_cleanup (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) pci_unregister_driver (&epic_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) module_init(epic_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) module_exit(epic_cleanup);