^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) Written 1998-2000 by Donald Becker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) This software may be used and distributed according to the terms of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) the GNU General Public License (GPL), incorporated herein by reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) Drivers based on or derived from this code fall under the GPL and must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) retain the authorship, copyright and license notice. This file is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) a complete program and may only be used when the entire operating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) system is licensed under the GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) The author may be reached as becker@scyld.com, or C/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) Scyld Computing Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) 410 Severn Ave., Suite 210
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) Annapolis MD 21403
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) Support information and updates available at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) http://www.scyld.com/network/pci-skeleton.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) Linux kernel updates:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) Version 2.51, Nov 17, 2001 (jgarzik):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) - Add ethtool support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) - Replace some MII-related magic numbers with constants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DRV_NAME "fealnx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static int debug; /* 1-> print debug message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static int max_interrupt_work = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static int multicast_filter_limit = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* Setting to > 1518 effectively disables this feature. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int rx_copybreak;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Used to pass the media type, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Both 'options[]' and 'full_duplex[]' should exist for driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* interoperability. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* The media type is usually passed in 'options[]'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define MAX_UNITS 8 /* More are supported, limit only on options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* Operational parameters that are set at compile time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* Keep the ring sizes a power of two for compile efficiency. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Making the Tx ring too large decreases the effectiveness of channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* bonding and packet priority. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* There are no ill effects from too-large receive rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) // 88-12-9 modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) // #define TX_RING_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) // #define RX_RING_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define TX_RING_SIZE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define RX_RING_SIZE 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Operational parameters that usually are not changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Time in jiffies before concluding the transmitter is hung. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define TX_TIMEOUT (2*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Include files, designed to support most kernel versions 2.0.0 and later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <asm/processor.h> /* Processor type for cache alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* This driver was written to use PCI memory space, however some x86 systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) work only with I/O space accesses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #ifndef __alpha__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define USE_IO_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* This is only in the support-all-kernels source code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define RUN_AT(x) (jiffies + (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) MODULE_AUTHOR("Myson or whoever");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) module_param(max_interrupt_work, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) module_param(debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) module_param(rx_copybreak, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) module_param(multicast_filter_limit, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) module_param_array(options, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) module_param_array(full_duplex, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) MIN_REGION_SIZE = 136,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* A chip capabilities table, matching the entries in pci_tbl[] above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) enum chip_capability_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) HAS_MII_XCVR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) HAS_CHIP_XCVR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* 89/6/13 add, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* for different PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) enum phy_type_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) MysonPHY = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) AhdocPHY = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) SeeqPHY = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) MarvellPHY = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) Myson981 = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) LevelOnePHY = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) OtherPHY = 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct chip_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) char *chip_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static const struct chip_info skel_netdrv_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Offsets to the Command and Status Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) enum fealnx_offsets {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) PAR0 = 0x0, /* physical address 0-3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) PAR1 = 0x04, /* physical address 4-5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) MAR0 = 0x08, /* multicast address 0-3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) MAR1 = 0x0C, /* multicast address 4-7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) FAR0 = 0x10, /* flow-control address 0-3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) FAR1 = 0x14, /* flow-control address 4-5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) TCRRCR = 0x18, /* receive & transmit configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) BCR = 0x1C, /* bus command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) TXPDR = 0x20, /* transmit polling demand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) RXPDR = 0x24, /* receive polling demand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) RXCWP = 0x28, /* receive current word pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) TXLBA = 0x2C, /* transmit list base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) RXLBA = 0x30, /* receive list base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ISR = 0x34, /* interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) IMR = 0x38, /* interrupt mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) FTH = 0x3C, /* flow control high/low threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) TALLY = 0x44, /* tally counters for crc and mpa */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) TSR = 0x48, /* tally counter for transmit status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) BMCRSR = 0x4c, /* basic mode control and status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) PHYIDENTIFIER = 0x50, /* phy identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ANARANLPAR = 0x54, /* auto-negotiation advertisement and link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) partner ability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Bits in the interrupt status/enable registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) enum intr_status_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) RFCON = 0x00020000, /* receive flow control xon packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) RFCOFF = 0x00010000, /* receive flow control xoff packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) LSCStatus = 0x00008000, /* link status change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ANCStatus = 0x00004000, /* autonegotiation completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) FBE = 0x00002000, /* fatal bus error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) FBEMask = 0x00001800, /* mask bit12-11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ParityErr = 0x00000000, /* parity error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) TargetErr = 0x00001000, /* target abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) MasterErr = 0x00000800, /* master error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) TUNF = 0x00000400, /* transmit underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ROVF = 0x00000200, /* receive overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ETI = 0x00000100, /* transmit early int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ERI = 0x00000080, /* receive early int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) CNTOVF = 0x00000040, /* counter overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) RBU = 0x00000020, /* receive buffer unavailable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) TBU = 0x00000010, /* transmit buffer unavilable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) TI = 0x00000008, /* transmit interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) RI = 0x00000004, /* receive interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) RxErr = 0x00000002, /* receive error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* Bits in the NetworkConfig register, W for writing, R for reading */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* FIXME: some names are invented by me. Marked with (name?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* If you have docs and know bit names, please fix 'em */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) enum rx_mode_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) CR_W_ENH = 0x02000000, /* enhanced mode (name?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) CR_W_FD = 0x00100000, /* full duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) CR_W_PS10 = 0x00080000, /* 10 mbit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) CR_W_TXEN = 0x00040000, /* tx enable (name?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) CR_W_PS1000 = 0x00010000, /* 1000 mbit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) CR_W_RXMODEMASK = 0x000000e0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) CR_W_PROM = 0x00000080, /* promiscuous mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) CR_W_AB = 0x00000040, /* accept broadcast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) CR_W_AM = 0x00000020, /* accept mutlicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) CR_W_ARP = 0x00000008, /* receive runt pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) CR_W_ALP = 0x00000004, /* receive long pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) CR_W_SEP = 0x00000002, /* receive error pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) CR_W_RXEN = 0x00000001, /* rx enable (unicast?) (name?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) CR_R_TXSTOP = 0x04000000, /* tx stopped (name?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) CR_R_FD = 0x00100000, /* full duplex detected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) CR_R_PS10 = 0x00080000, /* 10 mbit detected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) CR_R_RXSTOP = 0x00008000, /* rx stopped (name?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* The Tulip Rx and Tx buffer descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct fealnx_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) s32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) s32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u32 next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct fealnx_desc *next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct sk_buff *skbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u32 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u32 reserved2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* Bits in network_desc.status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) enum rx_desc_status_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) RXOWN = 0x80000000, /* own bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) FLNGMASK = 0x0fff0000, /* frame length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) FLNGShift = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) MARSTATUS = 0x00004000, /* multicast address received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) BARSTATUS = 0x00002000, /* broadcast address received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) PHYSTATUS = 0x00001000, /* physical address received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) RXFSD = 0x00000800, /* first descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) RXLSD = 0x00000400, /* last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ErrorSummary = 0x80, /* error summary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) RUNTPKT = 0x40, /* runt packet received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) LONGPKT = 0x20, /* long packet received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) FAE = 0x10, /* frame align error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) CRC = 0x08, /* crc error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) RXER = 0x04, /* receive error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) enum rx_desc_control_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) RXIC = 0x00800000, /* interrupt control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) RBSShift = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) enum tx_desc_status_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) TXOWN = 0x80000000, /* own bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) JABTO = 0x00004000, /* jabber timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) CSL = 0x00002000, /* carrier sense lost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) LC = 0x00001000, /* late collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) EC = 0x00000800, /* excessive collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) UDF = 0x00000400, /* fifo underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) DFR = 0x00000200, /* deferred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) HF = 0x00000100, /* heartbeat fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) NCRMask = 0x000000ff, /* collision retry count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) NCRShift = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) enum tx_desc_control_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) TXIC = 0x80000000, /* interrupt control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ETIControl = 0x40000000, /* early transmit interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) TXLD = 0x20000000, /* last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) TXFD = 0x10000000, /* first descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) CRCEnable = 0x08000000, /* crc control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) PADEnable = 0x04000000, /* padding control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) RetryTxLC = 0x02000000, /* retry late collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) PKTSMask = 0x3ff800, /* packet size bit21-11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) PKTSShift = 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) TBSShift = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* BootROM/EEPROM/MII Management Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #define MASK_MIIR_MII_READ 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #define MASK_MIIR_MII_WRITE 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #define MASK_MIIR_MII_MDO 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #define MASK_MIIR_MII_MDI 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #define MASK_MIIR_MII_MDC 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* ST+OP+PHYAD+REGAD+TA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* ------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Constants for Myson PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* ------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #define MysonPHYID 0xd0000302
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* 89-7-27 add, (begin) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #define MysonPHYID0 0x0302
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #define StatusRegister 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #define SPEED100 0x0400 // bit10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #define FULLMODE 0x0800 // bit11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* 89-7-27 add, (end) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* ------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Constants for Seeq 80225 PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* ------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #define SeeqPHYID0 0x0016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #define MIIRegister18 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #define SPD_DET_100 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #define DPLX_DET_FULL 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* ------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Constants for Ahdoc 101 PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* ------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #define AhdocPHYID0 0x0022
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #define DiagnosticReg 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define DPLX_FULL 0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #define Speed_100 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* 89/6/13 add, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /* Constants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* -------------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #define MarvellPHYID0 0x0141
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #define LevelOnePHYID0 0x0013
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #define MII1000BaseTControlReg 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #define MII1000BaseTStatusReg 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #define SpecificReg 17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* for 1000BaseT Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) #define PHYAbletoPerform1000FullDuplex 0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #define PHYAbletoPerform1000HalfDuplex 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #define PHY1000AbilityMask 0x300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) // for phy specific status register, marvell phy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #define SpeedMask 0x0c000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #define Speed_1000M 0x08000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #define Speed_100M 0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #define Speed_10M 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #define Full_Duplex 0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) // 89/12/29 add, for phy specific status register, levelone phy, (begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #define LXT1000_100M 0x08000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #define LXT1000_1000M 0x0c000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #define LXT1000_Full 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) // 89/12/29 add, for phy specific status register, levelone phy, (end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* for 3-in-1 case, BMCRSR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #define LinkIsUp2 0x00040000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* for PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define LinkIsUp 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct netdev_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* Descriptor rings first for alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct fealnx_desc *rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct fealnx_desc *tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dma_addr_t rx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dma_addr_t tx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Media monitoring timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct timer_list timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* Reset timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct timer_list reset_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int reset_timer_armed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) unsigned long crvalue_sv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned long imrvalue_sv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Frequently used values: keep some adjacent for cache effect. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct pci_dev *pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned long crvalue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned long bcrvalue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long imrvalue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct fealnx_desc *cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct fealnx_desc *lack_rxbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) int really_rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct fealnx_desc *cur_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct fealnx_desc *cur_tx_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int really_tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int free_tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) unsigned int rx_buf_sz; /* Based on MTU+slack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* These values are keep track of the transceiver/media in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) unsigned int linkok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned int line_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) unsigned int duplexmode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) unsigned int default_port:4; /* Last dev->if_port value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) unsigned int PHYType;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* MII transceiver section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int mii_cnt; /* MII device addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) unsigned char phys[2]; /* MII device addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct mii_if_info mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) void __iomem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static int mdio_read(struct net_device *dev, int phy_id, int location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static int netdev_open(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void getlinktype(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static void getlinkstatus(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void netdev_timer(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static void reset_timer(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void init_ring(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static irqreturn_t intr_handler(int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int netdev_rx(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void set_rx_mode(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static void __set_rx_mode(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static struct net_device_stats *get_stats(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static const struct ethtool_ops netdev_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static int netdev_close(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void reset_rx_descriptors(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void reset_tx_descriptors(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int delay = 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) while (--delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) int delay = 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) while (--delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) == (CR_R_RXSTOP+CR_R_TXSTOP) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static const struct net_device_ops netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) .ndo_open = netdev_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .ndo_stop = netdev_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .ndo_start_xmit = start_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .ndo_get_stats = get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) .ndo_set_rx_mode = set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .ndo_do_ioctl = mii_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) .ndo_tx_timeout = fealnx_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static int fealnx_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct netdev_private *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int i, option, err, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static int card_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) char boardname[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) unsigned long len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) unsigned int chip_id = ent->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) void *ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dma_addr_t ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #ifdef USE_IO_OPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int bar = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int bar = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) card_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) sprintf(boardname, "fealnx%d", card_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) option = card_idx < MAX_UNITS ? options[card_idx] : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) i = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (i) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) len = pci_resource_len(pdev, bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (len < MIN_REGION_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) "region size %ld too small, aborting\n", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) i = pci_request_regions(pdev, boardname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ioaddr = pci_iomap(pdev, bar, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!ioaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) goto err_out_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) dev = alloc_etherdev(sizeof(struct netdev_private));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* read ethernet id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) for (i = 0; i < 6; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* Reset the chip to erase previous misconfiguration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) iowrite32(0x00000001, ioaddr + BCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /* Make certain the descriptor lists are aligned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) np->mem = ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) spin_lock_init(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) np->pci_dev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) np->flags = skel_netdrv_tbl[chip_id].flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) np->mii.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) np->mii.mdio_read = mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) np->mii.mdio_write = mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) np->mii.phy_id_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) np->mii.reg_num_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!ring_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) goto err_out_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) np->rx_ring = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) np->rx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (!ring_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) goto err_out_free_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) np->tx_ring = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) np->tx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* find the connected MII xcvrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (np->flags == HAS_MII_XCVR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int phy, phy_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) phy++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int mii_status = mdio_read(dev, phy, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (mii_status != 0xffff && mii_status != 0x0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) np->phys[phy_idx++] = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) "MII PHY found at address %d, status "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) "0x%4.4x.\n", phy, mii_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* get phy type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unsigned int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) data = mdio_read(dev, np->phys[0], 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (data == SeeqPHYID0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) np->PHYType = SeeqPHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) else if (data == AhdocPHYID0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) np->PHYType = AhdocPHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) else if (data == MarvellPHYID0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) np->PHYType = MarvellPHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) else if (data == MysonPHYID0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) np->PHYType = Myson981;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) else if (data == LevelOnePHYID0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) np->PHYType = LevelOnePHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) np->PHYType = OtherPHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) np->mii_cnt = phy_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (phy_idx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) "MII PHY not found -- this device may "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) "not operate correctly.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) np->phys[0] = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* 89/6/23 add, (begin) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* get phy type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) np->PHYType = MysonPHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) np->PHYType = OtherPHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) np->mii.phy_id = np->phys[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (dev->mem_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) option = dev->mem_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* The lower four bits are the media type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (option > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (option & 0x200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) np->mii.full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) np->default_port = option & 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) np->mii.full_duplex = full_duplex[card_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (np->mii.full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* 89/6/13 add, (begin) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) // if (np->PHYType==MarvellPHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unsigned int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) data = mdio_read(dev, np->phys[0], 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) data = (data & 0xfcff) | 0x0200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) mdio_write(dev, np->phys[0], 9, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* 89/6/13 add, (end) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (np->flags == HAS_MII_XCVR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) np->mii.force_media = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dev->netdev_ops = &netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) dev->ethtool_ops = &netdev_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) goto err_out_free_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dev->dev_addr, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) err_out_free_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) np->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) err_out_free_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) np->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) err_out_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) err_out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) pci_iounmap(pdev, ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) err_out_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static void fealnx_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) np->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) np->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) pci_iounmap(pdev, np->mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) printk(KERN_ERR "fealnx: remove for unknown device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ulong miir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) unsigned int mask, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* enable MII output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) miir = (ulong) ioread32(miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) miir &= 0xfffffff0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* send 32 1's preamble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* low MDC; MDO is already high (miir) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) miir &= ~MASK_MIIR_MII_MDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) iowrite32(miir, miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* high MDC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) miir |= MASK_MIIR_MII_MDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) iowrite32(miir, miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* calculate ST+OP+PHYAD+REGAD+TA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) data = opcode | (phyad << 7) | (regad << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* sent out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) mask = 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) while (mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* low MDC, prepare MDO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (mask & data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) miir |= MASK_MIIR_MII_MDO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) iowrite32(miir, miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* high MDC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) miir |= MASK_MIIR_MII_MDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) iowrite32(miir, miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) udelay(30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (mask == 0x2 && opcode == OP_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) miir &= ~MASK_MIIR_MII_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return miir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static int mdio_read(struct net_device *dev, int phyad, int regad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) void __iomem *miiport = np->mem + MANAGEMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) ulong miir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) unsigned int mask, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* read data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) mask = 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) while (mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* low MDC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) miir &= ~MASK_MIIR_MII_MDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) iowrite32(miir, miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* read MDI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) miir = ioread32(miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (miir & MASK_MIIR_MII_MDI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) data |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* high MDC, and wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) miir |= MASK_MIIR_MII_MDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) iowrite32(miir, miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) udelay(30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /* next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* low MDC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) miir &= ~MASK_MIIR_MII_MDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) iowrite32(miir, miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return data & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) void __iomem *miiport = np->mem + MANAGEMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ulong miir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) unsigned int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* write data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) mask = 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) while (mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* low MDC, prepare MDO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (mask & data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) miir |= MASK_MIIR_MII_MDO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) iowrite32(miir, miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* high MDC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) miir |= MASK_MIIR_MII_MDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) iowrite32(miir, miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* next */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /* low MDC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) miir &= ~MASK_MIIR_MII_MDC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) iowrite32(miir, miiport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static int netdev_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) void __iomem *ioaddr = np->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) const int irq = np->pci_dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) iowrite32(0x00000001, ioaddr + BCR); /* Reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) iowrite16(((unsigned short*)dev->dev_addr)[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ioaddr + PAR0 + i*2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) init_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* Initialize other registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* Configure the PCI bus bursts and FIFO thresholds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 486: Set 8 longword burst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 586: no burst limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) Burst length 5:3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 0 0 0 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 0 0 1 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 0 1 0 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 0 1 1 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 1 0 0 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 1 0 1 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 1 1 0 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 1 1 1 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) Wait the specified 50 PCI cycles after a reset by initializing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) Tx and Rx queues and the address filter list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) FIXME (Ueimor): optimistic for alpha + posted writes ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) np->bcrvalue = 0x10; /* little-endian, 8 burst length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) np->bcrvalue |= 0x04; /* big-endian */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) #if defined(__i386__) && !defined(MODULE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (boot_cpu_data.x86 <= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) np->crvalue = 0xa00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) np->crvalue = 0xe00; /* rx 128 burst length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) // 89/12/29 add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) // 90/1/16 modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) // np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (np->pci_dev->device == 0x891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) np->bcrvalue |= 0x200; /* set PROG bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) np->crvalue |= CR_W_ENH; /* set enhanced bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) np->imrvalue |= ETI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) iowrite32(np->bcrvalue, ioaddr + BCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (dev->if_port == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) dev->if_port = np->default_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) iowrite32(0, ioaddr + RXPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) // 89/9/1 modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) // np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) np->mii.full_duplex = np->mii.force_media;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) getlinkstatus(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (np->linkok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) getlinktype(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) __set_rx_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /* Clear and Enable interrupts by setting the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) iowrite32(np->imrvalue, ioaddr + IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* Set the timer to check for link beat. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) timer_setup(&np->timer, netdev_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) np->timer.expires = RUN_AT(3 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* timer handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) add_timer(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) timer_setup(&np->reset_timer, reset_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) np->reset_timer_armed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static void getlinkstatus(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* function: Routine will read MII Status Register to get link status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /* input : dev... pointer to the adapter block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* output : none. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned int i, DelayTime = 0x1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) np->linkok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (np->PHYType == MysonPHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) for (i = 0; i < DelayTime; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) np->linkok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) for (i = 0; i < DelayTime; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) np->linkok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) static void getlinktype(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (np->PHYType == MysonPHY) { /* 3-in-1 case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (ioread32(np->mem + TCRRCR) & CR_R_FD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) np->duplexmode = 2; /* full duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) np->duplexmode = 1; /* half duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) np->line_speed = 1; /* 10M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) np->line_speed = 2; /* 100M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) unsigned int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) data = mdio_read(dev, np->phys[0], MIIRegister18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (data & SPD_DET_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) np->line_speed = 2; /* 100M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) np->line_speed = 1; /* 10M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (data & DPLX_DET_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) np->duplexmode = 2; /* full duplex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) np->duplexmode = 1; /* half duplex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) } else if (np->PHYType == AhdocPHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) unsigned int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) data = mdio_read(dev, np->phys[0], DiagnosticReg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (data & Speed_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) np->line_speed = 2; /* 100M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) np->line_speed = 1; /* 10M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (data & DPLX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) np->duplexmode = 2; /* full duplex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) np->duplexmode = 1; /* half duplex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /* 89/6/13 add, (begin) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) else if (np->PHYType == MarvellPHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) unsigned int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) data = mdio_read(dev, np->phys[0], SpecificReg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (data & Full_Duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) np->duplexmode = 2; /* full duplex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) np->duplexmode = 1; /* half duplex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) data &= SpeedMask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (data == Speed_1000M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) np->line_speed = 3; /* 1000M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) else if (data == Speed_100M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) np->line_speed = 2; /* 100M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) np->line_speed = 1; /* 10M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* 89/6/13 add, (end) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* 89/7/27 add, (begin) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) else if (np->PHYType == Myson981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) unsigned int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) data = mdio_read(dev, np->phys[0], StatusRegister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (data & SPEED100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) np->line_speed = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) np->line_speed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (data & FULLMODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) np->duplexmode = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) np->duplexmode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* 89/7/27 add, (end) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* 89/12/29 add */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) else if (np->PHYType == LevelOnePHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) unsigned int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) data = mdio_read(dev, np->phys[0], SpecificReg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (data & LXT1000_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) np->duplexmode = 2; /* full duplex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) np->duplexmode = 1; /* half duplex mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) data &= SpeedMask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (data == LXT1000_1000M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) np->line_speed = 3; /* 1000M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) else if (data == LXT1000_100M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) np->line_speed = 2; /* 100M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) np->line_speed = 1; /* 10M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (np->line_speed == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) np->crvalue |= CR_W_PS10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) else if (np->line_speed == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) np->crvalue |= CR_W_PS1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (np->duplexmode == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) np->crvalue |= CR_W_FD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /* Take lock before calling this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static void allocate_rx_buffers(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* allocate skb for rx buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) while (np->really_rx_count != RX_RING_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) skb = netdev_alloc_skb(dev, np->rx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) break; /* Better luck next round. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) while (np->lack_rxbuf->skbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) np->lack_rxbuf->skbuff = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) np->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) np->lack_rxbuf->status = RXOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) ++np->really_rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static void netdev_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct netdev_private *np = from_timer(np, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct net_device *dev = np->mii.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) void __iomem *ioaddr = np->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) int old_crvalue = np->crvalue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) unsigned int old_linkok = np->linkok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ioread32(ioaddr + TCRRCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (np->flags == HAS_MII_XCVR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) getlinkstatus(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) getlinktype(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (np->crvalue != old_crvalue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) stop_nic_rxtx(ioaddr, np->crvalue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) iowrite32(np->crvalue, ioaddr + TCRRCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) allocate_rx_buffers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) np->timer.expires = RUN_AT(10 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) add_timer(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* Take lock before calling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /* Reset chip and disable rx, tx and interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static void reset_and_disable_rxtx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) void __iomem *ioaddr = np->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) int delay=51;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* Reset the chip's Tx and Rx processes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) stop_nic_rxtx(ioaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /* Disable interrupts by clearing the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) iowrite32(0, ioaddr + IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* Reset the chip to erase previous misconfiguration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) iowrite32(0x00000001, ioaddr + BCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) We surely wait too long (address+data phase). Who cares? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) while (--delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) ioread32(ioaddr + BCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* Take lock before calling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* Restore chip after reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static void enable_rxtx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) void __iomem *ioaddr = np->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) reset_rx_descriptors(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ioaddr + TXLBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ioaddr + RXLBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) iowrite32(np->bcrvalue, ioaddr + BCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) iowrite32(0, ioaddr + RXPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) __set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /* Clear and Enable interrupts by setting the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) iowrite32(np->imrvalue, ioaddr + IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) iowrite32(0, ioaddr + TXPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static void reset_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct netdev_private *np = from_timer(np, t, reset_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) struct net_device *dev = np->mii.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) np->crvalue = np->crvalue_sv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) np->imrvalue = np->imrvalue_sv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) reset_and_disable_rxtx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* works for me without this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) reset_tx_descriptors(dev); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) enable_rxtx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) np->reset_timer_armed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) void __iomem *ioaddr = np->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) printk(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) "%s: Transmit timed out, status %8.8x, resetting...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) dev->name, ioread32(ioaddr + ISR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) for (i = 0; i < RX_RING_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) printk(KERN_CONT " %8.8x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) (unsigned int) np->rx_ring[i].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) printk(KERN_CONT "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) for (i = 0; i < TX_RING_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) printk(KERN_CONT "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) reset_and_disable_rxtx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) reset_tx_descriptors(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) enable_rxtx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) netif_wake_queue(dev); /* or .._start_.. ?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static void init_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /* initialize rx variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) np->cur_rx = &np->rx_ring[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) np->lack_rxbuf = np->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) np->really_rx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* initial rx descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) np->rx_ring[i].status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) np->rx_ring[i].next_desc = np->rx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) (i + 1)*sizeof(struct fealnx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) np->rx_ring[i].skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* for the last rx descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /* allocate skb for rx buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) np->lack_rxbuf = &np->rx_ring[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) ++np->really_rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) np->rx_ring[i].skbuff = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) np->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) np->rx_ring[i].status = RXOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) np->rx_ring[i].control |= RXIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /* initialize tx variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) np->cur_tx = &np->tx_ring[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) np->cur_tx_copy = &np->tx_ring[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) np->really_tx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) np->free_tx_count = TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) np->tx_ring[i].status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /* do we need np->tx_ring[i].control = XXX; ?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) np->tx_ring[i].next_desc = np->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) (i + 1)*sizeof(struct fealnx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) np->tx_ring[i].skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /* for the last tx descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) spin_lock_irqsave(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) np->cur_tx_copy->skbuff = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) #define one_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) #define BPT 1022
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) #if defined(one_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) // 89/12/29 add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (np->pci_dev->device == 0x891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) np->cur_tx_copy->control |= ETIControl | RetryTxLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) np->cur_tx_copy->status = TXOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) --np->free_tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) #elif defined(two_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (skb->len > BPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct fealnx_desc *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) /* for the first descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) skb->data, BPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) /* for the last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) next = np->cur_tx_copy->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) next->skbuff = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) next->control = TXIC | TXLD | CRCEnable | PADEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) next->control |= (skb->len << PKTSShift); /* pkt size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) next->control |= ((skb->len - BPT) << TBSShift); /* buf size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) // 89/12/29 add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (np->pci_dev->device == 0x891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) np->cur_tx_copy->control |= ETIControl | RetryTxLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) next->buffer = dma_map_single(&ep->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) skb->data + BPT, skb->len - BPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) next->status = TXOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) np->cur_tx_copy->status = TXOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) np->cur_tx_copy = next->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) np->free_tx_count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) skb->data, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) // 89/12/29 add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (np->pci_dev->device == 0x891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) np->cur_tx_copy->control |= ETIControl | RetryTxLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) np->cur_tx_copy->status = TXOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) --np->free_tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (np->free_tx_count < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ++np->really_tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) iowrite32(0, np->mem + TXPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) spin_unlock_irqrestore(&np->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* Take lock before calling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /* Chip probably hosed tx ring. Clean up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) static void reset_tx_descriptors(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct fealnx_desc *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /* initialize tx variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) np->cur_tx = &np->tx_ring[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) np->cur_tx_copy = &np->tx_ring[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) np->really_tx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) np->free_tx_count = TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) cur = &np->tx_ring[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (cur->skbuff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) dma_unmap_single(&np->pci_dev->dev, cur->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) cur->skbuff->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) dev_kfree_skb_any(cur->skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) cur->skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) cur->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) cur->control = 0; /* needed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /* probably not needed. We do it for purely paranoid reasons */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) cur->next_desc = np->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) (i + 1)*sizeof(struct fealnx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) cur->next_desc_logical = &np->tx_ring[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /* for the last tx descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /* Take lock and stop rx before calling this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static void reset_rx_descriptors(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) struct fealnx_desc *cur = np->cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) allocate_rx_buffers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (cur->skbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) cur->status = RXOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) cur = cur->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) np->mem + RXLBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /* The interrupt handler does all of the Rx thread work and cleans up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) after the Tx thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static irqreturn_t intr_handler(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct net_device *dev = (struct net_device *) dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) void __iomem *ioaddr = np->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) long boguscnt = max_interrupt_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) unsigned int num_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) spin_lock(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) iowrite32(0, ioaddr + IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) u32 intr_status = ioread32(ioaddr + ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /* Acknowledge all of the current interrupt sources ASAP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) iowrite32(intr_status, ioaddr + ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) intr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (!(intr_status & np->imrvalue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) // 90/1/16 delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) // if (intr_status & FBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) // { /* fatal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) // stop_nic_tx(ioaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) // stop_nic_rx(ioaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) // break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) // };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (intr_status & TUNF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) iowrite32(0, ioaddr + TXPDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (intr_status & CNTOVF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /* missed pkts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) dev->stats.rx_missed_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) ioread32(ioaddr + TALLY) & 0x7fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* crc error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) dev->stats.rx_crc_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (intr_status & (RI | RBU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (intr_status & RI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) netdev_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) stop_nic_rx(ioaddr, np->crvalue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) reset_rx_descriptors(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) iowrite32(np->crvalue, ioaddr + TCRRCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) while (np->really_tx_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) long tx_status = np->cur_tx->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) long tx_control = np->cur_tx->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct fealnx_desc *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) next = np->cur_tx->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) tx_status = next->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) tx_control = next->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (tx_status & TXOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (!(np->crvalue & CR_W_ENH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (tx_status & (CSL | LC | EC | UDF | HF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (tx_status & EC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (tx_status & CSL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) dev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (tx_status & LC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) dev->stats.tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (tx_status & UDF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if ((tx_status & HF) && np->mii.full_duplex == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) dev->stats.tx_heartbeat_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) dev->stats.tx_bytes +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) ((tx_control & PKTSMask) >> PKTSShift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) dev->stats.collisions +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) ((tx_status & NCRMask) >> NCRShift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) dev->stats.tx_bytes +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) ((tx_control & PKTSMask) >> PKTSShift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /* Free the original skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) dma_unmap_single(&np->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) np->cur_tx->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) np->cur_tx->skbuff->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) dev_consume_skb_irq(np->cur_tx->skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) np->cur_tx->skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) --np->really_tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (np->cur_tx->control & TXLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) np->cur_tx = np->cur_tx->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) ++np->free_tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) np->cur_tx = np->cur_tx->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) np->cur_tx = np->cur_tx->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) np->free_tx_count += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) num_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) } /* end of for loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (num_tx && np->free_tx_count >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /* read transmit status for enhanced mode only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (np->crvalue & CR_W_ENH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) long data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) data = ioread32(ioaddr + TSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) dev->stats.tx_errors += (data & 0xff000000) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) dev->stats.tx_aborted_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) (data & 0xff000000) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) dev->stats.tx_window_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) (data & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) dev->stats.collisions += (data & 0x0000ffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (--boguscnt < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) printk(KERN_WARNING "%s: Too much work at interrupt, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) "status=0x%4.4x.\n", dev->name, intr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (!np->reset_timer_armed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) np->reset_timer_armed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) np->reset_timer.expires = RUN_AT(HZ/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) add_timer(&np->reset_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) stop_nic_rxtx(ioaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* or netif_tx_disable(dev); ?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) /* Prevent other paths from enabling tx,rx,intrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) np->crvalue_sv = np->crvalue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) np->imrvalue_sv = np->imrvalue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) np->imrvalue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) /* read the tally counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) /* missed pkts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) /* crc error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) dev->stats.rx_crc_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) dev->name, ioread32(ioaddr + ISR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) iowrite32(np->imrvalue, ioaddr + IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) spin_unlock(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) /* This routine is logically part of the interrupt handler, but separated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) for clarity and better register allocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) static int netdev_rx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) void __iomem *ioaddr = np->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) /* If EOP is set on the next entry, it's a new packet. Send it up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) s32 rx_status = np->cur_rx->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (np->really_rx_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) (rx_status & ErrorSummary)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (rx_status & ErrorSummary) { /* there was a fatal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) "%s: Receive error, Rx status %8.8x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) dev->name, rx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) dev->stats.rx_errors++; /* end of a packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (rx_status & (LONGPKT | RUNTPKT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) if (rx_status & RXER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (rx_status & CRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) int need_to_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) int desno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) struct fealnx_desc *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /* check this packet is received completely? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) cur = np->cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) while (desno <= np->really_rx_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) ++desno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if ((!(cur->status & RXOWN)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) (cur->status & RXLSD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) /* goto next rx descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) cur = cur->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (desno > np->really_rx_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) need_to_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) } else /* RXLSD did not find, something error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) need_to_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (need_to_reset == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) /* free all rx descriptors related this long pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) for (i = 0; i < desno; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (!np->cur_rx->skbuff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) "%s: I'm scared\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) np->cur_rx->status = RXOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) np->cur_rx = np->cur_rx->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) } else { /* rx error, need to reset this chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) stop_nic_rx(ioaddr, np->crvalue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) reset_rx_descriptors(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) iowrite32(np->crvalue, ioaddr + TCRRCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) break; /* exit the while loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) } else { /* this received pkt is ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /* Omit the four octet CRC from the length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) #ifndef final_version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) if (debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) " status %x.\n", pkt_len, rx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) /* Check if the packet is long enough to accept without copying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) to a minimally-sized skbuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (pkt_len < rx_copybreak &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) skb_reserve(skb, 2); /* 16 byte align the IP header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) dma_sync_single_for_cpu(&np->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) np->cur_rx->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) np->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) /* Call copy + cksum if available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) #if ! defined(__alpha__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) skb_copy_to_linear_data(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) np->cur_rx->skbuff->data, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) skb_put(skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) skb_put_data(skb, np->cur_rx->skbuff->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) dma_sync_single_for_device(&np->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) np->cur_rx->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) np->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) dma_unmap_single(&np->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) np->cur_rx->buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) np->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) skb_put(skb = np->cur_rx->skbuff, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) np->cur_rx->skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) --np->really_rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) dev->stats.rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) np->cur_rx = np->cur_rx->next_desc_logical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) } /* end of while loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) /* allocate skb for rx buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) allocate_rx_buffers(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static struct net_device_stats *get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) void __iomem *ioaddr = np->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) /* The chip only need report frame silently dropped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) dev->stats.rx_missed_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) ioread32(ioaddr + TALLY) & 0x7fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) dev->stats.rx_crc_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /* for dev->set_multicast_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) static void set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) spin_lock_irqsave(lp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) __set_rx_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) spin_unlock_irqrestore(lp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) /* Take lock before calling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) static void __set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) void __iomem *ioaddr = np->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) u32 mc_filter[2]; /* Multicast hash filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) u32 rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) memset(mc_filter, 0xff, sizeof(mc_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) (dev->flags & IFF_ALLMULTI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) /* Too many to match, or accept all multicasts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) memset(mc_filter, 0xff, sizeof(mc_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) rx_mode = CR_W_AB | CR_W_AM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) memset(mc_filter, 0, sizeof(mc_filter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) unsigned int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) mc_filter[bit >> 5] |= (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) rx_mode = CR_W_AB | CR_W_AM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) stop_nic_rxtx(ioaddr, np->crvalue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) iowrite32(mc_filter[0], ioaddr + MAR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) iowrite32(mc_filter[1], ioaddr + MAR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) np->crvalue &= ~CR_W_RXMODEMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) np->crvalue |= rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) iowrite32(np->crvalue, ioaddr + TCRRCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static int netdev_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) spin_lock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) mii_ethtool_get_link_ksettings(&np->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) spin_unlock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) static int netdev_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) spin_lock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) spin_unlock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) static int netdev_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) return mii_nway_restart(&np->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) static u32 netdev_get_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) return mii_link_ok(&np->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) static u32 netdev_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) return debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) static void netdev_set_msglevel(struct net_device *dev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) debug = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) static const struct ethtool_ops netdev_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) .get_drvinfo = netdev_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) .nway_reset = netdev_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) .get_link = netdev_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) .get_msglevel = netdev_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) .set_msglevel = netdev_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) .get_link_ksettings = netdev_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) .set_link_ksettings = netdev_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) spin_lock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) spin_unlock_irq(&np->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) static int netdev_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) void __iomem *ioaddr = np->mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) /* Disable interrupts by clearing the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) iowrite32(0x0000, ioaddr + IMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /* Stop the chip's Tx and Rx processes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) stop_nic_rxtx(ioaddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) del_timer_sync(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) del_timer_sync(&np->reset_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) free_irq(np->pci_dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) /* Free all the skbuffs in the Rx queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) struct sk_buff *skb = np->rx_ring[i].skbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) np->rx_ring[i].status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) dma_unmap_single(&np->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) np->rx_ring[i].buffer, np->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) np->rx_ring[i].skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) struct sk_buff *skb = np->tx_ring[i].skbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) dma_unmap_single(&np->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) np->tx_ring[i].buffer, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) np->tx_ring[i].skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) static const struct pci_device_id fealnx_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) {} /* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) static struct pci_driver fealnx_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) .name = "fealnx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) .id_table = fealnx_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) .probe = fealnx_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) .remove = fealnx_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) static int __init fealnx_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) return pci_register_driver(&fealnx_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) static void __exit fealnx_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) pci_unregister_driver(&fealnx_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) module_init(fealnx_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) module_exit(fealnx_exit);