^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Written 1997-2001 by Donald Becker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) This software may be used and distributed according to the terms of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) the GNU General Public License (GPL), incorporated herein by reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) Drivers based on or derived from this code fall under the GPL and must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) retain the authorship, copyright and license notice. This file is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) a complete program and may only be used when the entire operating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) system is licensed under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) It also supports the Symbios Logic version of the same chip core.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) The author may be reached as becker@scyld.com, or C/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) Scyld Computing Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) 410 Severn Ave., Suite 210
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) Annapolis MD 21403
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) Support and updates available at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) http://www.scyld.com/network/yellowfin.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) [link no longer provides useful info -jgarzik]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define DRV_NAME "yellowfin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define DRV_VERSION "2.1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define DRV_RELDATE "Sep 11, 2006"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* The user-configurable values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) These may be modified when a driver module is loaded.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int max_interrupt_work = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static int mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* System-wide count of bogus-rx frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static int bogus_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static int dma_ctrl = 0x004A0263; /* Constrained by errata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #elif defined(YF_NEW) /* A future perfect board :->. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int fifo_cfg = 0x0028;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static const int dma_ctrl = 0x004A0263; /* Constrained by errata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) Setting to > 1514 effectively disables this feature. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static int rx_copybreak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Used to pass the media type, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) No media types are currently defined. These exist for driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) interoperability.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define MAX_UNITS 8 /* More are supported, limit only on options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Do ugly workaround for GX server chipset errata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static int gx_fix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Operational parameters that are set at compile time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Keep the ring sizes a power of two for efficiency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) Making the Tx ring too long decreases the effectiveness of channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) bonding and packet priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) There are no ill effects from too-large receive rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define TX_RING_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define RX_RING_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* Operational parameters that usually are not changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Time in jiffies before concluding the transmitter is hung. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define TX_TIMEOUT (2*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define yellowfin_debug debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #include <asm/processor.h> /* Processor type for cache alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* These identify the driver base version and may not be removed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static const char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) module_param(max_interrupt_work, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) module_param(mtu, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) module_param(debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) module_param(rx_copybreak, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) module_param_array(options, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) module_param_array(full_duplex, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) module_param(gx_fix, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) Theory of Operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) I. Board Compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) This device driver is designed for the Packet Engines "Yellowfin" Gigabit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) Symbios 53C885E dual function chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) II. Board-specific settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) PCI bus devices are configured by the system at boot time, so no jumpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) need to be set on the board. The system BIOS preferably should assign the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) PCI INTA signal to an otherwise unused system IRQ line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) Note: Kernel versions earlier than 1.3.73 do not support shared PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) interrupt lines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) III. Driver operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) IIIa. Ring buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) This is a descriptor list scheme similar to that used by the EEPro100 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) Tulip. This driver uses two statically allocated fixed-size descriptor lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) formed into rings by a branch from the final descriptor to the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) The driver allocates full frame size skbuffs for the Rx ring buffers at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) open() time and passes the skb->data field to the Yellowfin as receive data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) a fresh skbuff is allocated and the frame is copied to the new skbuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) When the incoming frame is larger, the skbuff is passed directly up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) protocol stack and replaced by a newly allocated skbuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) The RX_COPYBREAK value is chosen to trade-off the memory wasted by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) using a full-sized skbuff for small frames vs. the copying costs of larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) frames. For small frames the copying cost is negligible (esp. considering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) that we are pre-loading the cache with immediately useful header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) information). For large frames the copying cost is non-trivial, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) larger copy might flush the cache of useful data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) IIIC. Synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) The driver runs as two independent, single-threaded flows of control. One
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) is the send-packet routine, which enforces single-threaded use by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) dev->tbusy flag. The other thread is the interrupt handler, which is single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) threaded by the hardware and other software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) The send packet thread has partial control over the Tx ring and 'dev->tbusy'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) queue slot is empty, it clears the tbusy flag when finished otherwise it sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) the 'yp->tx_full' flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) The interrupt handler has exclusive control over the Rx ring and records stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) from the Tx ring. After reaping the stats, it marks the Tx queue entry as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) clears both the tx_full and tbusy flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) IV. Notes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) and an AlphaStation to verifty the Alpha port!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) IVb. References
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) Data Manual v3.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) IVc. Errata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) See Packet Engines confidential appendix (prototype chips only).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) enum capability_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) HasMACAddrBug=32, /* Only on early revs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* The PCI I/O space extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) YELLOWFIN_SIZE = 0x100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct pci_id_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct match_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int pci, pci_mask, subsystem, subsystem_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int revision, revision_mask; /* Only 8 bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) } id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int drv_flags; /* Driver use, intended as capability flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static const struct pci_id_info pci_id_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) HasMII | DontUseEeprom },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static const struct pci_device_id yellowfin_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* Offsets to the Yellowfin registers. Various sizes and alignments. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) enum yellowfin_offsets {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) MII_Status=0xAE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) RxDepth=0xB8, FlowCtrl=0xBC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) EEFeature=0xF5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* The Yellowfin Rx and Tx buffer descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) Elements are written as 32 bit for endian portability. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct yellowfin_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) __le32 dbdma_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) __le32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) __le32 branch_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) __le32 result_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct tx_status_words {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u16 tx_errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u16 tx_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u16 paused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u16 total_tx_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #else /* Little endian chips. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u16 tx_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u16 tx_errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u16 total_tx_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u16 paused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #endif /* __BIG_ENDIAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* Bits in yellowfin_desc.cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) enum desc_cmd_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) CMD_NOP=0x60000000, CMD_STOP=0x70000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) BRANCH_IFTRUE=0x040000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Bits in yellowfin_desc.status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) enum desc_status_bits { RX_EOP=0x0040, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Bits in the interrupt status/mask registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) enum intr_status_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) IntrEarlyRx=0x100, IntrWakeup=0x200, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #define PRIV_ALIGN 31 /* Required alignment mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #define MII_CNT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct yellowfin_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* Descriptor rings first for alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) Tx requires a second descriptor for status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct yellowfin_desc *rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct yellowfin_desc *tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct sk_buff* rx_skbuff[RX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct sk_buff* tx_skbuff[TX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dma_addr_t rx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) dma_addr_t tx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct tx_status_words *tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dma_addr_t tx_status_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) struct timer_list timer; /* Media selection timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* Frequently used and paired value: keep adjacent for cache effect. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int chip_id, drv_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct pci_dev *pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) unsigned int rx_buf_sz; /* Based on MTU+slack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct tx_status_words *tx_tail_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) unsigned int cur_tx, dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) int tx_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) unsigned int tx_full:1; /* The Tx queue is full. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned int full_duplex:1; /* Full-duplex operation requested. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned int duplex_lock:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) unsigned int medialock:1; /* Do not sense media. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned int default_port:4; /* Last dev->if_port value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* MII transceiver section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int mii_cnt; /* MII device addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u16 advertising; /* NWay media advertisement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static int read_eeprom(void __iomem *ioaddr, int location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static int yellowfin_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void yellowfin_timer(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static int yellowfin_init_ring(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int yellowfin_rx(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void yellowfin_error(struct net_device *dev, int intr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static int yellowfin_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static void set_rx_mode(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static const struct ethtool_ops ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static const struct net_device_ops netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) .ndo_open = yellowfin_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) .ndo_stop = yellowfin_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) .ndo_start_xmit = yellowfin_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) .ndo_set_rx_mode = set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) .ndo_do_ioctl = netdev_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) .ndo_tx_timeout = yellowfin_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static int yellowfin_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct yellowfin_private *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int chip_idx = ent->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static int find_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int drv_flags = pci_id_tbl[chip_idx].drv_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) void *ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dma_addr_t ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #ifdef USE_IO_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int bar = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int bar = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* when built into the kernel, we only print version if device is found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static int printed_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (!printed_version++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) printk(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) i = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (i) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dev = alloc_etherdev(sizeof(*np));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (pci_request_regions(pdev, DRV_NAME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) goto err_out_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) pci_set_master (pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (drv_flags & DontUseEeprom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* Reset the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) iowrite32(0x80000000, ioaddr + DMACtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) spin_lock_init(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) np->pci_dev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) np->chip_id = chip_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) np->drv_flags = drv_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) np->base = ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!ring_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) goto err_out_cleardev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) np->tx_ring = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) np->tx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (!ring_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto err_out_unmap_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) np->rx_ring = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) np->rx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) &ring_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!ring_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) goto err_out_unmap_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) np->tx_status = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) np->tx_status_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (dev->mem_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) option = dev->mem_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* The lower four bits are the media type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (option > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (option & 0x200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) np->full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) np->default_port = option & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (np->default_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) np->medialock = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) np->full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (np->full_duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) np->duplex_lock = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* The Yellowfin-specific entries in the device structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dev->netdev_ops = &netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dev->ethtool_ops = ðtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) dev->mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) i = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) goto err_out_unmap_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) pci_id_tbl[chip_idx].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ioread32(ioaddr + ChipRev), ioaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) dev->dev_addr, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (np->drv_flags & HasMII) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) int phy, phy_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int mii_status = mdio_read(ioaddr, phy, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (mii_status != 0xffff && mii_status != 0x0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) np->phys[phy_idx++] = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) np->advertising = mdio_read(ioaddr, phy, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) phy, mii_status, np->advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) np->mii_cnt = phy_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) find_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) err_out_unmap_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) np->tx_status_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) err_out_unmap_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) np->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) err_out_unmap_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) np->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) err_out_cleardev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) pci_iounmap(pdev, ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) err_out_free_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) err_out_free_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) free_netdev (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static int read_eeprom(void __iomem *ioaddr, int location)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) iowrite8(location, ioaddr + EEAddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) while ((ioread8(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return ioread8(ioaddr + EERead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* MII Managemen Data I/O accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) These routines assume the MDIO controller is idle, and do not exit until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) the command is finished. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) iowrite16(1, ioaddr + MII_Cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) for (i = 10000; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if ((ioread16(ioaddr + MII_Status) & 1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return ioread16(ioaddr + MII_Rd_Data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) iowrite16(value, ioaddr + MII_Wr_Data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Wait for the command to finish. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) for (i = 10000; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if ((ioread16(ioaddr + MII_Status) & 1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static int yellowfin_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct yellowfin_private *yp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) const int irq = yp->pci_dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) void __iomem *ioaddr = yp->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* Reset the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) iowrite32(0x80000000, ioaddr + DMACtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) rc = yellowfin_init_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Set up various condition 'select' registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) There are no options here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) iowrite32(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) iowrite32(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) iowrite32(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) iowrite32(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) iowrite32(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) iowrite32(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* Initialize other registers: with so many this eventually this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) converted to an offset/value list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) iowrite32(dma_ctrl, ioaddr + DMACtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) iowrite16(fifo_cfg, ioaddr + FIFOcfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* Enable automatic generation of flow control frames, period 0xffff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) yp->tx_threshold = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (dev->if_port == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dev->if_port = yp->default_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Setting the Rx mode will start the Rx process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (yp->drv_flags & IsGigabit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* We are always in full-duplex mode with gigabit! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) yp->full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) iowrite16(0x01CF, ioaddr + Cnfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) iowrite16(0x1018, ioaddr + FrameGap1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) set_rx_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* Enable interrupts by setting the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) iowrite16(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) iowrite16(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) iowrite32(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) iowrite32(0x80008000, ioaddr + TxCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (yellowfin_debug > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Set the timer to check for link beat. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) timer_setup(&yp->timer, yellowfin_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) yp->timer.expires = jiffies + 3*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) add_timer(&yp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) free_irq(irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static void yellowfin_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct yellowfin_private *yp = from_timer(yp, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct net_device *dev = pci_get_drvdata(yp->pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) void __iomem *ioaddr = yp->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) int next_tick = 60*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (yellowfin_debug > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ioread16(ioaddr + IntrStatus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (yp->mii_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int negotiated = lpa & yp->advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (yellowfin_debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) yp->phys[0], bmsr, lpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (bmsr & BMSR_LSTATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) next_tick = 60*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) next_tick = 3*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) yp->timer.expires = jiffies + next_tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) add_timer(&yp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct yellowfin_private *yp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) void __iomem *ioaddr = yp->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) yp->cur_tx, yp->dirty_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ioread32(ioaddr + TxStatus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ioread32(ioaddr + RxStatus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* Note: these should be KERN_DEBUG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (yellowfin_debug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) pr_warn(" Rx ring %p: ", yp->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) for (i = 0; i < RX_RING_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) pr_cont(" %08x", yp->rx_ring[i].result_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) pr_warn(" Tx ring %p: ", yp->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) for (i = 0; i < TX_RING_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) pr_cont(" %04x /%08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) yp->tx_status[i].tx_errs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) yp->tx_ring[i].result_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* If the hardware is found to hang regularly, we will update the code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) to reinitialize the chip here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) dev->if_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* Wake the potentially-idle transmit channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) iowrite32(0x10001000, yp->base + TxCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) netif_wake_queue (dev); /* Typical path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static int yellowfin_init_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct yellowfin_private *yp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) yp->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) yp->cur_rx = yp->cur_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) yp->dirty_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) yp->rx_ring[i].dbdma_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) yp->rx_skbuff[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) skb_reserve(skb, 2); /* 16 byte align the IP header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) yp->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) DMA_FROM_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (i != RX_RING_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) dev_kfree_skb(yp->rx_skbuff[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) #define NO_TXSTATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) #ifdef NO_TXSTATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* In this mode the Tx ring needs only a single descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) yp->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* Wrap ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* Tx ring needs a pair of descriptors, the second for the status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) j = 2*i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) yp->tx_skbuff[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /* Branch on Tx error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) (j+1)*sizeof(struct yellowfin_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (yp->flags & FullTxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) yp->tx_ring[j].dbdma_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) i*sizeof(struct tx_status_words));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Symbios chips write only tx_errs word. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) yp->tx_ring[j].dbdma_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) yp->tx_ring[j].request_cnt = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* Om pade ummmmm... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) i*sizeof(struct tx_status_words) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) &(yp->tx_status[0].tx_errs) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) &(yp->tx_status[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* Wrap ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) yp->tx_tail_desc = &yp->tx_status[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct yellowfin_private *yp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) unsigned entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) int len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) netif_stop_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Note: Ordering is important here, set the field with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) "ownership" bit last, and only then increment cur_tx. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) /* Calculate the next Tx descriptor entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) entry = yp->cur_tx % TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* Fix GX chipset errata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (cacheline_end > 24 || cacheline_end == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) len = skb->len + 32 - cacheline_end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (skb_padto(skb, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) yp->tx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) yp->tx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) #ifdef NO_TXSTATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) len, DMA_TO_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) yp->tx_ring[entry].result_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (entry >= TX_RING_SIZE-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* New stop command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) yp->tx_ring[entry].dbdma_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) yp->cur_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) yp->tx_ring[entry<<1].request_cnt = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) len, DMA_TO_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* The input_last (status-write) command is constant, but we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) rewrite the subsequent 'stop' command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) yp->cur_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Final step -- overwrite the old 'stop' command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) yp->tx_ring[entry<<1].dbdma_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) CMD_TX_PKT | BRANCH_IFTRUE) | len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* Non-x86 Todo: explicitly flush cache lines here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /* Wake the potentially-idle transmit channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) iowrite32(0x10001000, yp->base + TxCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) netif_start_queue (dev); /* Typical path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) yp->tx_full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (yellowfin_debug > 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) yp->cur_tx, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* The interrupt handler does all of the Rx thread work and cleans up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) after the Tx thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct net_device *dev = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct yellowfin_private *yp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int boguscnt = max_interrupt_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) yp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ioaddr = yp->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) spin_lock (&yp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) u16 intr_status = ioread16(ioaddr + IntrClear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (yellowfin_debug > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) intr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (intr_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (intr_status & (IntrRxDone | IntrEarlyRx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) yellowfin_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) iowrite32(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) #ifdef NO_TXSTATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) int entry = yp->dirty_tx % TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (yp->tx_ring[entry].result_status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) skb = yp->tx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* Free the original skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) dma_unmap_single(&yp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) le32_to_cpu(yp->tx_ring[entry].addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) yp->tx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (yp->tx_full &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* The ring is no longer full, clear tbusy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) yp->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) unsigned dirty_tx = yp->dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dirty_tx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* Todo: optimize this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int entry = dirty_tx % TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) u16 tx_errs = yp->tx_status[entry].tx_errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) #ifndef final_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (yellowfin_debug > 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) yp->tx_status[entry].tx_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) yp->tx_status[entry].tx_errs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) yp->tx_status[entry].total_tx_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) yp->tx_status[entry].paused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (tx_errs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) break; /* It still hasn't been Txed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) skb = yp->tx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (tx_errs & 0xF810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /* There was an major error, log it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) #ifndef final_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (yellowfin_debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) tx_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) #ifndef final_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (yellowfin_debug > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) tx_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) dev->stats.collisions += tx_errs & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* Free the original skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) dma_unmap_single(&yp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) yp->tx_ring[entry << 1].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) yp->tx_skbuff[entry] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Mark status as empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) yp->tx_status[entry].tx_errs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) #ifndef final_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) dirty_tx, yp->cur_tx, yp->tx_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) dirty_tx += TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (yp->tx_full &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* The ring is no longer full, clear tbusy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) yp->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) yp->dirty_tx = dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /* Log errors and other uncommon events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (intr_status & 0x2ee) /* Abnormal error summary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) yellowfin_error(dev, intr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (--boguscnt < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) intr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (yellowfin_debug > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ioread16(ioaddr + IntrStatus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) spin_unlock (&yp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* This routine is logically part of the interrupt handler, but separated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) for clarity and better register allocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static int yellowfin_rx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct yellowfin_private *yp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) int entry = yp->cur_rx % RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (yellowfin_debug > 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) entry, yp->rx_ring[entry].result_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) printk(KERN_DEBUG " #%d desc. %08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) yp->rx_ring[entry].result_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* If EOP is set on the next entry, it's a new packet. Send it up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct yellowfin_desc *desc = &yp->rx_ring[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct sk_buff *rx_skb = yp->rx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) s16 frame_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) u16 desc_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) int data_size, __maybe_unused yf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) u8 *buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if(!desc->result_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) dma_sync_single_for_cpu(&yp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) le32_to_cpu(desc->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) yp->rx_buf_sz, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) desc_status = le32_to_cpu(desc->result_status) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) buf_addr = rx_skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) data_size = (le32_to_cpu(desc->dbdma_cmd) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) le32_to_cpu(desc->result_status)) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (yellowfin_debug > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) printk(KERN_DEBUG " %s() status was %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) __func__, frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (--boguscnt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) yf_size = sizeof(struct yellowfin_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if ( ! (desc_status & RX_EOP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (data_size != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) desc_status, data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /* There was a error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (yellowfin_debug > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) printk(KERN_DEBUG " %s() Rx error was %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) __func__, frame_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (frame_status & 0x0060) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (frame_status < 0) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) } else if ( !(yp->drv_flags & IsGigabit) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) u8 status1 = buf_addr[data_size-2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) u8 status2 = buf_addr[data_size-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (status1 & 0xC0) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (status2 & 0x03) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (status2 & 0x04) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (status2 & 0x80) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) } else if ((yp->flags & HasMACAddrBug) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) entry * yf_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) dev->dev_addr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) entry * yf_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) "\377\377\377\377\377\377")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (bogus_rx++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) netdev_warn(dev, "Bad frame to %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) buf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) int pkt_len = data_size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* To verify: Yellowfin Length should omit the CRC! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) #ifndef final_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (yellowfin_debug > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) printk(KERN_DEBUG " %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) __func__, pkt_len, data_size, boguscnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) /* Check if the packet is long enough to just pass up the skbuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) without copying to a properly sized skbuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (pkt_len > rx_copybreak) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) skb_put(skb = rx_skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dma_unmap_single(&yp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) le32_to_cpu(yp->rx_ring[entry].addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) yp->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) yp->rx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) skb = netdev_alloc_skb(dev, pkt_len + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) skb_reserve(skb, 2); /* 16 byte align the IP header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) skb_put(skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) dma_sync_single_for_device(&yp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) le32_to_cpu(desc->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) yp->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) dev->stats.rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) entry = (++yp->cur_rx) % RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /* Refill the Rx ring buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) entry = yp->dirty_rx % RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (yp->rx_skbuff[entry] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) break; /* Better luck next round. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) yp->rx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) yp->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) DMA_FROM_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (entry != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) yp->rx_ring[entry - 1].dbdma_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) | yp->rx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static void yellowfin_error(struct net_device *dev, int intr_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /* Hmmmmm, it's not clear what to do here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static int yellowfin_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct yellowfin_private *yp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) void __iomem *ioaddr = yp->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) netif_stop_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (yellowfin_debug > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) ioread16(ioaddr + TxStatus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) ioread16(ioaddr + RxStatus),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) ioread16(ioaddr + IntrStatus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) yp->cur_tx, yp->dirty_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) yp->cur_rx, yp->dirty_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* Disable interrupts by clearing the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) iowrite16(0x0000, ioaddr + IntrEnb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* Stop the chip's Tx and Rx processes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) iowrite32(0x80000000, ioaddr + RxCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) iowrite32(0x80000000, ioaddr + TxCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) del_timer(&yp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) #if defined(__i386__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (yellowfin_debug > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) printk(KERN_DEBUG " Tx ring at %08llx:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) (unsigned long long)yp->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) for (i = 0; i < TX_RING_SIZE*2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) for (i = 0; i < TX_RING_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) printk(KERN_DEBUG " #%d status %04x %04x %04x %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) printk(KERN_DEBUG " Rx ring %08llx:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) (unsigned long long)yp->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) yp->rx_ring[i].result_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (yellowfin_debug > 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) printk(KERN_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) for (j = 0; j < 0x50; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) pr_cont(" %04x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) #endif /* __i386__ debugging only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) free_irq(yp->pci_dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /* Free all the skbuffs in the Rx queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (yp->rx_skbuff[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) dev_kfree_skb(yp->rx_skbuff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) yp->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) dev_kfree_skb(yp->tx_skbuff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) yp->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (yellowfin_debug > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) bogus_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /* Set or clear the multicast filter for this adaptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static void set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) struct yellowfin_private *yp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) void __iomem *ioaddr = yp->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) u16 cfg_value = ioread16(ioaddr + Cnfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /* Stop the Rx process to change any value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) iowrite16(0x000F, ioaddr + AddrMode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) } else if ((netdev_mc_count(dev) > 64) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) (dev->flags & IFF_ALLMULTI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /* Too many to filter well, or accept all multicasts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) iowrite16(0x000B, ioaddr + AddrMode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) u16 hash_table[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) memset(hash_table, 0, sizeof(hash_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* Due to a bug in the early chip versions, multiple filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) slots must be set for each address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (yp->drv_flags & HasMulticastBug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) hash_table[bit >> 4] |= (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) hash_table[bit >> 4] |= (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) hash_table[bit >> 4] |= (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) hash_table[bit >> 4] |= (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /* Copy the hash table to the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) iowrite16(0x0003, ioaddr + AddrMode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) } else { /* Normal, unicast/broadcast-only mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) iowrite16(0x0001, ioaddr + AddrMode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) /* Restart the Rx process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct yellowfin_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) strlcpy(info->version, DRV_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static const struct ethtool_ops ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) .get_drvinfo = yellowfin_get_drvinfo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct yellowfin_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) void __iomem *ioaddr = np->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct mii_ioctl_data *data = if_mii(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) switch(cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) case SIOCGMIIPHY: /* Get address of MII PHY in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) data->phy_id = np->phys[0] & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) case SIOCGMIIREG: /* Read MII PHY register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) case SIOCSMIIREG: /* Write MII PHY register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (data->phy_id == np->phys[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) u16 value = data->val_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) switch (data->reg_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* Check for autonegotiation on or reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) np->medialock = (value & 0x9000) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (np->medialock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) np->full_duplex = (value & 0x0100) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) case 4: np->advertising = value; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /* Perhaps check_duplex(dev), depending on chip semantics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static void yellowfin_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct yellowfin_private *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) BUG_ON(!dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) np->tx_status_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) np->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) np->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) unregister_netdev (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) pci_iounmap(pdev, np->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) pci_release_regions (pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) free_netdev (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static struct pci_driver yellowfin_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) .id_table = yellowfin_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) .probe = yellowfin_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) .remove = yellowfin_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) static int __init yellowfin_init (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) /* when a module, this is printed whether or not devices are found in probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) printk(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return pci_register_driver(&yellowfin_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static void __exit yellowfin_cleanup (void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) pci_unregister_driver (&yellowfin_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) module_init(yellowfin_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) module_exit(yellowfin_cleanup);