^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright(c) 1999 - 2006 Intel Corporation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * e100.c: Intel(R) PRO/100 ethernet driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * original e100 driver, but better described as a munging of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * References:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Intel 8255x 10/100 Mbps Ethernet Controller Family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Open Source Software Developers Manual,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * http://sourceforge.net/projects/e1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Theory of Operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * I. General
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * controller family, which includes the 82557, 82558, 82559, 82550,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * 82551, and 82562 devices. 82558 and greater controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * integrate the Intel 82555 PHY. The controllers are used in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * server and client network interface cards, as well as in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * configurations. 8255x supports a 32-bit linear addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * mode and operates at 33Mhz PCI clock rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * II. Driver Operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Memory-mapped mode is used exclusively to access the device's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * shared-memory structure, the Control/Status Registers (CSR). All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * setup, configuration, and control of the device, including queuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * of Tx, Rx, and configuration commands is through the CSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * cmd_lock serializes accesses to the CSR command register. cb_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * protects the shared Command Block List (CBL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * 8255x is highly MII-compliant and all access to the PHY go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * through the Management Data Interface (MDI). Consequently, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * driver leverages the mii.c library shared with other MII-compliant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Big- and Little-Endian byte order as well as 32- and 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * archs are supported. Weak-ordered memory and non-cache-coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * archs are supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * III. Transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * together in a fixed-size ring (CBL) thus forming the flexible mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * memory structure. A TCB marked with the suspend-bit indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * the end of the ring. The last TCB processed suspends the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * controller, and the controller can be restarted by issue a CU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * resume command to continue from the suspend point, or a CU start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * command to start at a given position in the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Non-Tx commands (config, multicast setup, etc) are linked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * into the CBL ring along with Tx commands. The common structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * used for both Tx and non-Tx commands is the Command Block (CB).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * cb_to_use is the next CB to use for queuing a command; cb_to_clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * is the next CB to check for completion; cb_to_send is the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * CB to start on in case of a previous failure to resume. CB clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * up happens in interrupt context in response to a CU interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * cbs_avail keeps track of number of free CB resources available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Hardware padding of short packets to minimum packet size is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * enabled. 82557 pads with 7Eh, while the later controllers pad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * with 00h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * IV. Receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * The Receive Frame Area (RFA) comprises a ring of Receive Frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Descriptors (RFD) + data buffer, thus forming the simplified mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * memory structure. Rx skbs are allocated to contain both the RFD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * and the data buffer, but the RFD is pulled off before the skb is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * indicated. The data buffer is aligned such that encapsulated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * protocol headers are u32-aligned. Since the RFD is part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * mapped shared memory, and completion status is contained within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * the RFD, the RFD must be dma_sync'ed to maintain a consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * view from software and hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * In order to keep updates to the RFD link field from colliding with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * hardware writes to mark packets complete, we use the feature that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * hardware will not write to a size 0 descriptor and mark the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * packet as end-of-list (EL). After updating the link, we remove EL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * and only then restore the size such that hardware may use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * previous-to-end RFD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * Under typical operation, the receive unit (RU) is start once,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * and the controller happily fills RFDs as frames arrive. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * replacement RFDs cannot be allocated, or the RU goes non-active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * the RU must be restarted. Frame arrival generates an interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * and Rx indication and re-allocation happen in the same context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * therefore no locking is required. A software-generated interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * is generated from the watchdog to recover from a failed allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * scenario where all Rx resources have been indicated and none re-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * placed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * V. Miscellaneous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * VLAN offloading of tagging, stripping and filtering is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * supported, but driver will accommodate the extra 4-byte VLAN tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * for processing by upper layers. Tx/Rx Checksum offloading is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * not supported (hardware limitation).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Thanks to JC (jchapman@katalix.com) for helping with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * testing/troubleshooting the development driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * o several entry points race with dev->close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * o check for tx-no-resources/stop Q races with tx clean/wake Q
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * FIXES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * - Stratus87247: protect MDI control register manipulations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define DRV_NAME "e100"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define E100_WATCHDOG_PERIOD (2 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define E100_NAPI_WEIGHT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define FIRMWARE_D101M "e100/d101m_ucode.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define FIRMWARE_D101S "e100/d101s_ucode.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define FIRMWARE_D102E "e100/d102e_ucode.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) MODULE_DESCRIPTION(DRV_DESCRIPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) MODULE_AUTHOR(DRV_COPYRIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) MODULE_FIRMWARE(FIRMWARE_D101M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) MODULE_FIRMWARE(FIRMWARE_D101S);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) MODULE_FIRMWARE(FIRMWARE_D102E);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int debug = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int eeprom_bad_csum_allow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int use_io = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) module_param(debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) module_param(eeprom_bad_csum_allow, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) module_param(use_io, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static const struct pci_device_id e100_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) { 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) MODULE_DEVICE_TABLE(pci, e100_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) enum mac {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mac_82557_D100_A = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) mac_82557_D100_B = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) mac_82557_D100_C = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) mac_82558_D101_A4 = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) mac_82558_D101_B0 = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) mac_82559_D101M = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mac_82559_D101S = 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) mac_82550_D102 = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) mac_82550_D102_C = 13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) mac_82551_E = 14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mac_82551_F = 15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) mac_82551_10 = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) mac_unknown = 0xFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) enum phy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) phy_100a = 0x000003E0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) phy_100c = 0x035002A8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) phy_82555_tx = 0x015002A8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) phy_nsc_tx = 0x5C002000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) phy_82562_et = 0x033002A8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) phy_82562_em = 0x032002A8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) phy_82562_ek = 0x031002A8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) phy_82562_eh = 0x017002A8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) phy_82552_v = 0xd061004d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) phy_unknown = 0xFFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* CSR (Control/Status Registers) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct csr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u8 stat_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) u8 cmd_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) u8 cmd_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) u32 gen_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) } scb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u32 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u16 flash_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u8 eeprom_ctrl_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u8 eeprom_ctrl_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u32 mdi_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u32 rx_dma_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) enum scb_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) rus_no_res = 0x08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rus_ready = 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) rus_mask = 0x3C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) enum ru_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) RU_SUSPENDED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) RU_RUNNING = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) RU_UNINITIALIZED = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) enum scb_stat_ack {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) stat_ack_not_ours = 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) stat_ack_sw_gen = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) stat_ack_rnr = 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) stat_ack_cu_idle = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) stat_ack_frame_rx = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) stat_ack_cu_cmd_done = 0x80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) stat_ack_not_present = 0xFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) enum scb_cmd_hi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) irq_mask_none = 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) irq_mask_all = 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) irq_sw_gen = 0x02,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) enum scb_cmd_lo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) cuc_nop = 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ruc_start = 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ruc_load_base = 0x06,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) cuc_start = 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) cuc_resume = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) cuc_dump_addr = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) cuc_dump_stats = 0x50,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) cuc_load_base = 0x60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) cuc_dump_reset = 0x70,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) enum cuc_dump {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) cuc_dump_complete = 0x0000A005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) cuc_dump_reset_complete = 0x0000A007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) enum port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) software_reset = 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) selftest = 0x0001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) selective_reset = 0x0002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) enum eeprom_ctrl_lo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) eesk = 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) eecs = 0x02,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) eedi = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) eedo = 0x08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) enum mdi_ctrl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) mdi_write = 0x04000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) mdi_read = 0x08000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) mdi_ready = 0x10000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) enum eeprom_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) op_write = 0x05,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) op_read = 0x06,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) op_ewds = 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) op_ewen = 0x13,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) enum eeprom_offsets {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) eeprom_cnfg_mdix = 0x03,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) eeprom_phy_iface = 0x06,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) eeprom_id = 0x0A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) eeprom_config_asf = 0x0D,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) eeprom_smbus_addr = 0x90,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) enum eeprom_cnfg_mdix {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) eeprom_mdix_enabled = 0x0080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) enum eeprom_phy_iface {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) NoSuchPhy = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) I82553AB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) I82553C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) I82503,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) DP83840,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) S80C240,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) S80C24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) I82555,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) DP83840A = 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) enum eeprom_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) eeprom_id_wol = 0x0020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) enum eeprom_config_asf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) eeprom_asf = 0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) eeprom_gcl = 0x4000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) enum cb_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) cb_complete = 0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) cb_ok = 0x2000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * cb_command - Command Block flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) enum cb_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) cb_nop = 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) cb_iaaddr = 0x0001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) cb_config = 0x0002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) cb_multi = 0x0003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) cb_tx = 0x0004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) cb_ucode = 0x0005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) cb_dump = 0x0006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) cb_tx_sf = 0x0008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) cb_tx_nc = 0x0010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) cb_cid = 0x1f00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) cb_i = 0x2000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) cb_s = 0x4000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) cb_el = 0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct rfd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) __le16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) __le16 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) __le32 link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) __le32 rbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) __le16 actual_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) __le16 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct rx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct rx *next, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #if defined(__BIG_ENDIAN_BITFIELD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #define X(a,b) b,a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #define X(a,b) a,b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*0*/ u8 X(byte_count:6, pad0:2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /*2*/ u8 adaptive_ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) term_write_cache_line:1), pad3:4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*4*/ u8 X(rx_dma_max_count:7, pad4:1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) rx_save_overruns : 1), rx_save_bad_frames : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) tx_dynamic_tbd:1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) link_status_wake:1), arp_wake:1), mcmatch_wake:1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) loopback:2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*11*/ u8 X(linear_priority:3, pad11:5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*13*/ u8 ip_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*14*/ u8 ip_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pad15_2:1), crs_or_cdt:1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*16*/ u8 fc_delay_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /*17*/ u8 fc_delay_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) rx_long_ok:1), fc_priority_threshold:3), pad18:1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) full_duplex_force:1), full_duplex_pin:1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) u8 pad_d102[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #define E100_MAX_MULTICAST_ADDRS 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct multi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) __le16 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Important: keep total struct u32-aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) #define UCODE_SIZE 134
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) __le16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) __le16 command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) __le32 link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) u8 iaaddr[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) __le32 ucode[UCODE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct multi multi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) u32 tbd_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) u16 tcb_byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) u8 threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) u8 tbd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) __le32 buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) __le16 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) u16 eol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) } tbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) } tcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) __le32 dump_buffer_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct cb *next, *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) enum loopback {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) lb_none = 0, lb_mac = 1, lb_phy = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) tx_multiple_collisions, tx_total_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) rx_short_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) __le16 xmt_tco_frames, rcv_tco_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) __le32 complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct mem {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) u32 signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) u32 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) } selftest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) u8 dump_buf[596];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct param_range {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) u32 min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) u32 max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct param_range rfds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct param_range cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct nic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* Begin: frequently used values: keep adjacent for cache effect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) u32 msg_enable ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct rx *rxs ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct rx *rx_to_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct rx *rx_to_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct rfd blank_rfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) enum ru_state ru_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) spinlock_t cb_lock ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) spinlock_t cmd_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct csr __iomem *csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) enum scb_cmd_lo cuc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) unsigned int cbs_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct cb *cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct cb *cb_to_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct cb *cb_to_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct cb *cb_to_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) __le16 tx_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* End: frequently used values: keep adjacent for cache effect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ich = (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) promiscuous = (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) multicast_all = (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) wol_magic = (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ich_10h_workaround = (1 << 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) } flags ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) enum mac mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) enum phy phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct params params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct timer_list watchdog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct mii_if_info mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct work_struct tx_timeout_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) enum loopback loopback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct mem *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct dma_pool *cbs_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) dma_addr_t cbs_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) u8 adaptive_ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) u8 tx_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) u32 tx_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) u32 tx_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u32 tx_deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) u32 tx_single_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) u32 tx_multiple_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) u32 tx_fc_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) u32 tx_tco_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) u32 rx_fc_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u32 rx_fc_unsupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) u32 rx_tco_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) u32 rx_short_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) u32 rx_over_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) u16 eeprom_wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) __le16 eeprom[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) spinlock_t mdio_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static inline void e100_write_flush(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Flush previous PCI writes through intermediate bridges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * by doing a benign read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) (void)ioread8(&nic->csr->scb.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static void e100_enable_irq(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) spin_lock_irqsave(&nic->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) e100_write_flush(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_unlock_irqrestore(&nic->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static void e100_disable_irq(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) spin_lock_irqsave(&nic->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) e100_write_flush(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) spin_unlock_irqrestore(&nic->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static void e100_hw_reset(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* Put CU and RU into idle with a selective reset to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * device off of PCI bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) iowrite32(selective_reset, &nic->csr->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) e100_write_flush(nic); udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* Now fully reset device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) iowrite32(software_reset, &nic->csr->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) e100_write_flush(nic); udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* Mask off our interrupt line - it's unmasked after reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) e100_disable_irq(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static int e100_self_test(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* Passing the self-test is a pretty good indication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * that the device can DMA to/from host memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) nic->mem->selftest.signature = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) nic->mem->selftest.result = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) iowrite32(selftest | dma_addr, &nic->csr->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) e100_write_flush(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /* Wait 10 msec for self-test to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Interrupts are enabled after self-test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) e100_disable_irq(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /* Check results of self-test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (nic->mem->selftest.result != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) netif_err(nic, hw, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) "Self-test failed: result=0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) nic->mem->selftest.result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (nic->mem->selftest.signature == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u32 cmd_addr_data[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) u8 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* Three cmds: write/erase enable, write data, write/erase disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) cmd_addr_data[0] = op_ewen << (addr_len - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) le16_to_cpu(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) cmd_addr_data[2] = op_ewds << (addr_len - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* Bit-bang cmds to write word to eeprom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) for (j = 0; j < 3; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Chip select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) e100_write_flush(nic); udelay(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) for (i = 31; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ctrl = (cmd_addr_data[j] & (1 << i)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) eecs | eedi : eecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) e100_write_flush(nic); udelay(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) e100_write_flush(nic); udelay(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* Wait 10 msec for cmd to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Chip deselect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) iowrite8(0, &nic->csr->eeprom_ctrl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) e100_write_flush(nic); udelay(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* General technique stolen from the eepro100 driver - very clever */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) u32 cmd_addr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) u16 data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) u8 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* Chip select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) e100_write_flush(nic); udelay(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Bit-bang to read word from eeprom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) for (i = 31; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) e100_write_flush(nic); udelay(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) e100_write_flush(nic); udelay(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* Eeprom drives a dummy zero to EEDO after receiving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * complete address. Use this to adjust addr_len. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (!(ctrl & eedo) && i > 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) *addr_len -= (i - 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) i = 17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) data = (data << 1) | (ctrl & eedo ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Chip deselect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) iowrite8(0, &nic->csr->eeprom_ctrl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) e100_write_flush(nic); udelay(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return cpu_to_le16(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* Load entire EEPROM image into driver cache and validate checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static int e100_eeprom_load(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) u16 addr, addr_len = 8, checksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* Try reading with an 8-bit addr len to discover actual addr len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) e100_eeprom_read(nic, &addr_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) nic->eeprom_wc = 1 << addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) for (addr = 0; addr < nic->eeprom_wc; addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (addr < nic->eeprom_wc - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) checksum += le16_to_cpu(nic->eeprom[addr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* The checksum, stored in the last word, is calculated such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * the sum of words should be 0xBABA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!eeprom_bad_csum_allow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /* Save (portion of) driver EEPROM cache to device and update checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) u16 addr, addr_len = 8, checksum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /* Try reading with an 8-bit addr len to discover actual addr len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) e100_eeprom_read(nic, &addr_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) nic->eeprom_wc = 1 << addr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (start + count >= nic->eeprom_wc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) for (addr = start; addr < start + count; addr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /* The checksum, stored in the last word, is calculated such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * the sum of words should be 0xBABA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) checksum += le16_to_cpu(nic->eeprom[addr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) nic->eeprom[nic->eeprom_wc - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) #define E100_WAIT_SCB_FAST 20 /* delay like the old code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) spin_lock_irqsave(&nic->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /* Previous command is accepted when SCB clears */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (unlikely(i > E100_WAIT_SCB_FAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (unlikely(cmd != cuc_resume))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) iowrite8(cmd, &nic->csr->scb.cmd_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) spin_unlock_irqrestore(&nic->cmd_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) spin_lock_irqsave(&nic->cb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (unlikely(!nic->cbs_avail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) cb = nic->cb_to_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) nic->cb_to_use = cb->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) nic->cbs_avail--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) cb->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) err = cb_prepare(nic, cb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (unlikely(!nic->cbs_avail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* Order is important otherwise we'll be in a race with h/w:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * set S-bit in current first, then clear S-bit in previous. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) cb->command |= cpu_to_le16(cb_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) cb->prev->command &= cpu_to_le16(~cb_s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) while (nic->cb_to_send != nic->cb_to_use) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) nic->cb_to_send->dma_addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* Ok, here's where things get sticky. It's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * possible that we can't schedule the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * because the controller is too busy, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * let's just queue the command and try again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * when another command is scheduled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (err == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) //request a reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) schedule_work(&nic->tx_timeout_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) nic->cuc_cmd = cuc_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) nic->cb_to_send = nic->cb_to_send->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) spin_unlock_irqrestore(&nic->cb_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) static int mdio_read(struct net_device *netdev, int addr, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* the standard mdio_ctrl() function for usual MII-compliant hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) u32 data_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * Stratus87247: we shouldn't be writing the MDI control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * register until the Ready bit shows True. Also, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * manipulation of the MDI control registers is a multi-step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * procedure it should be done under lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) spin_lock_irqsave(&nic->mdio_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) for (i = 100; i; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (unlikely(!i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) spin_unlock_irqrestore(&nic->mdio_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return 0; /* No way to indicate timeout error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) spin_unlock_irqrestore(&nic->mdio_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) dir == mdi_read ? "READ" : "WRITE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) addr, reg, data, data_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return (u16)data_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) u32 dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if ((reg == MII_BMCR) && (dir == mdi_write)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) MII_ADVERTISE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * Workaround Si issue where sometimes the part will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * autoneg to 100Mbps even when advertised.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (advert & ADVERTISE_100FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) data |= BMCR_SPEED100 | BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) else if (advert & ADVERTISE_100HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) data |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return mdio_ctrl_hw(nic, addr, dir, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* Fully software-emulated mdio_ctrl() function for cards without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * MII-compliant PHYs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * For now, this is mainly geared towards 80c24 support; in case of further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * requirements for other types (i82503, ...?) either extend this mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * or split it, whichever is cleaner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) u32 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) u32 dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* might need to allocate a netdev_priv'ed register array eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * to be able to record state changes, but for now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * some fully hardcoded register handling ought to be ok I guess. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (dir == mdi_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) case MII_BMCR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /* Auto-negotiation, right? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return BMCR_ANENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) case MII_BMSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return BMSR_LSTATUS /* for mii_link_ok() */ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) BMSR_ANEGCAPABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) BMSR_10FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) case MII_ADVERTISE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* 80c24 is a "combo card" PHY, right? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return ADVERTISE_10HALF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) ADVERTISE_10FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) dir == mdi_read ? "READ" : "WRITE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) addr, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) dir == mdi_read ? "READ" : "WRITE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) addr, reg, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) return 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static inline int e100_phy_supports_mii(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* for now, just check it by comparing whether we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) are using MII software emulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static void e100_get_defaults(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (nic->mac == mac_unknown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) nic->mac = mac_82557_D100_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) nic->params.rfds = rfds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) nic->params.cbs = cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /* Quadwords to DMA into FIFO before starting frame transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) nic->tx_threshold = 0xE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* no interrupt for every tx completion, delay = 256us if not 557 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* Template for a freshly allocated RFD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) nic->blank_rfd.command = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /* MII setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) nic->mii.phy_id_mask = 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) nic->mii.reg_num_mask = 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) nic->mii.dev = nic->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) nic->mii.mdio_read = mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) nic->mii.mdio_write = mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct config *config = &cb->u.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) u8 *c = (u8 *)config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct net_device *netdev = nic->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) cb->command = cpu_to_le16(cb_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) memset(config, 0, sizeof(struct config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) config->byte_count = 0x16; /* bytes in this struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) config->direct_rx_dma = 0x1; /* reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) config->standard_tcb = 0x1; /* 1=standard, 0=extended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) config->tx_underrun_retry = 0x3; /* # of underrun retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (e100_phy_supports_mii(nic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) config->pad10 = 0x6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) config->ifs = 0x6; /* x16 = inter frame spacing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) config->pad15_1 = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) config->pad15_2 = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) config->fc_delay_hi = 0x40; /* time delay for fc frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) config->tx_padding = 0x1; /* 1=pad short frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) config->pad18 = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) config->pad20_1 = 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) config->pad21_1 = 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) config->adaptive_ifs = nic->adaptive_ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) config->loopback = nic->loopback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (nic->mii.force_media && nic->mii.full_duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) config->full_duplex_force = 0x1; /* 1=force, 0=auto */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (nic->flags & promiscuous || nic->loopback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) config->promiscuous_mode = 0x1; /* 1=on, 0=off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (unlikely(netdev->features & NETIF_F_RXFCS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (nic->flags & multicast_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) config->multicast_all = 0x1; /* 1=accept, 0=no */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* disable WoL when up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) config->magic_packet_disable = 0x1; /* 1=off, 0=on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (nic->mac >= mac_82558_D101_A4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) config->mwi_enable = 0x1; /* 1=enable, 0=disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) config->standard_tcb = 0x0; /* 1=standard, 0=extended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (nic->mac >= mac_82559_D101M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) config->tno_intr = 0x1; /* TCO stats enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* Enable TCO in extended config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (nic->mac >= mac_82551_10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) config->byte_count = 0x20; /* extended bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) config->rx_d102_mode = 0x1; /* GMRC for TCO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) config->standard_stat_counter = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (netdev->features & NETIF_F_RXALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) c + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) c + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) c + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) /*************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * CPUSaver parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * All CPUSaver parameters are 16-bit literals that are part of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * "move immediate value" instruction. By changing the value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * the literal in the instruction before the code is loaded, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * driver can change the algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * INTDELAY - This loads the dead-man timer with its initial value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * When this timer expires the interrupt is asserted, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * timer is reset each time a new packet is received. (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * BUNDLEMAX below to set the limit on number of chained packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * The current default is 0x600 or 1536. Experiments show that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * the value should probably stay within the 0x200 - 0x1000.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * BUNDLEMAX -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * This sets the maximum number of frames that will be bundled. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * some situations, such as the TCP windowing algorithm, it may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * better to limit the growth of the bundle size than let it go as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * high as it can, because that could cause too much added latency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * The default is six, because this is the number of packets in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * default TCP window size. A value of 1 would make CPUSaver indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * an interrupt for every frame received. If you do not want to put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * a limit on the bundle size, set this value to xFFFF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * BUNDLESMALL -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * This contains a bit-mask describing the minimum size frame that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) * will be bundled. The default masks the lower 7 bits, which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * that any frame less than 128 bytes in length will not be bundled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * but will instead immediately generate an interrupt. This does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * not affect the current bundle in any way. Any frame that is 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * bytes or large will be bundled normally. This feature is meant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * to provide immediate indication of ACK frames in a TCP environment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * Customers were seeing poor performance when a machine with CPUSaver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * enabled was sending but not receiving. The delay introduced when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * the ACKs were received was enough to reduce total throughput, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * the sender would sit idle until the ACK was finally seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * The current default is 0xFF80, which masks out the lower 7 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * This means that any frame which is x7F (127) bytes or smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * will cause an immediate interrupt. Because this value must be a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * bit mask, there are only a few valid values that can be used. To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * turn this feature off, the driver can write the value xFFFF to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * lower word of this instruction (in the same way that the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * parameters are used). Likewise, a value of 0xF800 (2047) would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * cause an interrupt to be generated for every frame, because all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * standard Ethernet frames are <= 2047 bytes in length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) *************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /* if you wish to disable the ucode functionality, while maintaining the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * workarounds it provides, set the following defines to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * BUNDLESMALL 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * BUNDLEMAX 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * INTDELAY 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) #define BUNDLESMALL 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) #define BUNDLEMAX (u16)6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) #define INTDELAY (u16)1536 /* 0x600 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* Initialize firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static const struct firmware *e100_request_firmware(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) const char *fw_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) const struct firmware *fw = nic->fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) u8 timer, bundle, min_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) bool required = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /* do not load u-code for ICH devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (nic->flags & ich)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /* Search for ucode match against h/w revision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * Based on comments in the source code for the FreeBSD fxp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * "fixes for bugs in the B-step hardware (specifically, bugs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * with Inline Receive)."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * So we must fail if it cannot be loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * The other microcode files are only required for the optional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * CPUSaver feature. Nice to have, but no reason to fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (nic->mac == mac_82559_D101M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) fw_name = FIRMWARE_D101M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) } else if (nic->mac == mac_82559_D101S) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) fw_name = FIRMWARE_D101S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) fw_name = FIRMWARE_D102E;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) required = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) } else { /* No ucode on other devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* If the firmware has not previously been loaded, request a pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * to it. If it was previously loaded, we are reinitializing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * adapter, possibly in a resume from hibernate, in which case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * request_firmware() cannot be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (!fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) err = request_firmware(&fw, fw_name, &nic->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (required) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) netif_err(nic, probe, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) "Failed to load firmware \"%s\": %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) fw_name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) netif_info(nic, probe, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) "CPUSaver disabled. Needs \"%s\": %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) fw_name, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (fw->size != UCODE_SIZE * 4 + 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) netif_err(nic, probe, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) "Firmware \"%s\" has wrong size %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) fw_name, fw->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /* Read timer, bundle and min_size from end of firmware blob */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) timer = fw->data[UCODE_SIZE * 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) bundle = fw->data[UCODE_SIZE * 4 + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) min_size = fw->data[UCODE_SIZE * 4 + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) min_size >= UCODE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) netif_err(nic, probe, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) fw_name, timer, bundle, min_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* OK, firmware is validated and ready to use. Save a pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * to it in the nic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) nic->fw = fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) return fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) static int e100_setup_ucode(struct nic *nic, struct cb *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) const struct firmware *fw = (void *)skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) u8 timer, bundle, min_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* It's not a real skb; we just abused the fact that e100_exec_cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) will pass it through to here... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) cb->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) /* firmware is stored as little endian already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* Read timer, bundle and min_size from end of firmware blob */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) timer = fw->data[UCODE_SIZE * 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) bundle = fw->data[UCODE_SIZE * 4 + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) min_size = fw->data[UCODE_SIZE * 4 + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* Insert user-tunable settings in cb->u.ucode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) cb->command = cpu_to_le16(cb_ucode | cb_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) static inline int e100_load_ucode_wait(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) int err = 0, counter = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct cb *cb = nic->cb_to_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) fw = e100_request_firmware(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* If it's NULL, then no ucode is required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (IS_ERR_OR_NULL(fw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return PTR_ERR_OR_ZERO(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) netif_err(nic, probe, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) "ucode cmd failed with error %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* must restart cuc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) nic->cuc_cmd = cuc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* wait for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) e100_write_flush(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* wait for possibly (ouch) 500ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) while (!(cb->status & cpu_to_le16(cb_complete))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (!--counter) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /* ack any interrupts, something could have been set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) iowrite8(~0, &nic->csr->scb.stat_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* if the command failed, or is not OK, notify and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) netif_err(nic, probe, nic->netdev, "ucode load failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) cb->command = cpu_to_le16(cb_iaaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) cb->command = cpu_to_le16(cb_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) offsetof(struct mem, dump_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) static int e100_phy_check_without_mii(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) u8 phy_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) int without_mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) switch (phy_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) case I82503: /* Non-MII PHY; UNTESTED! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) case S80C24: /* Non-MII PHY; tested and working */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * doesn't have a programming interface of any sort. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * media is sensed automatically based on how the link partner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * is configured. This is, in essence, manual configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) netif_info(nic, probe, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) "found MII-less i82503 or 80c24 or other PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* these might be needed for certain MII-less cards...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * nic->flags |= ich;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * nic->flags |= ich_10h_workaround; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) without_mii = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) without_mii = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return without_mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) #define NCONFIG_AUTO_SWITCH 0x0080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) #define MII_NSC_CONG MII_RESV1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) #define NSC_CONG_ENABLE 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) #define NSC_CONG_TXREADY 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) #define ADVERTISE_FC_SUPPORTED 0x0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static int e100_phy_init(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) struct net_device *netdev = nic->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) u16 bmcr, stat, id_lo, id_hi, cong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) for (addr = 0; addr < 32; addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (addr == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /* uhoh, no PHY detected: check whether we seem to be some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * weird, rare variant which is *known* to not have any MII.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * But do this AFTER MII checking only, since this does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * lookup of EEPROM values which may easily be unreliable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (e100_phy_check_without_mii(nic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return 0; /* simply return and hope for the best */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* for unknown cases log a fatal error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) netif_err(nic, hw, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) "Failed to locate any known PHY, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) "phy_addr = %d\n", nic->mii.phy_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /* Get phy ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) nic->phy = (u32)id_hi << 16 | (u32)id_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) "phy ID = 0x%08X\n", nic->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /* Select the phy and isolate the rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) for (addr = 0; addr < 32; addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (addr != nic->mii.phy_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) } else if (nic->phy != phy_82552_v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) bmcr = mdio_read(netdev, addr, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) mdio_write(netdev, addr, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) bmcr & ~BMCR_ISOLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * Workaround for 82552:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * Clear the ISOLATE bit on selected phy_id last (mirrored on all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * other phy_id's) using bmcr value from addr discovery loop above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (nic->phy == phy_82552_v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) bmcr & ~BMCR_ISOLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /* Handle National tx phys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /* Disable congestion control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) cong |= NSC_CONG_TXREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) cong &= ~NSC_CONG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (nic->phy == phy_82552_v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) /* assign special tweaked mdio_ctrl() function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /* Workaround Si not advertising flow-control during autoneg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /* Reset for the above changes to take effect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) bmcr |= BMCR_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /* enable/disable MDI/MDI-X auto-switching. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static int e100_hw_init(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) e100_hw_reset(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if ((err = e100_self_test(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if ((err = e100_phy_init(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if ((err = e100_load_ucode_wait(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if ((err = e100_exec_cb(nic, NULL, e100_configure)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if ((err = e100_exec_cmd(nic, cuc_dump_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) nic->dma_addr + offsetof(struct mem, stats))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) e100_disable_irq(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) struct net_device *netdev = nic->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) cb->command = cpu_to_le16(cb_multi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) netdev_for_each_mc_addr(ha, netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (i == count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) static void e100_set_multicast_list(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) "mc_count=%d, flags=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) netdev_mc_count(netdev), netdev->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (netdev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) nic->flags |= promiscuous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) nic->flags &= ~promiscuous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (netdev->flags & IFF_ALLMULTI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) nic->flags |= multicast_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) nic->flags &= ~multicast_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) e100_exec_cb(nic, NULL, e100_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) e100_exec_cb(nic, NULL, e100_multi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static void e100_update_stats(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct net_device *dev = nic->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct net_device_stats *ns = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct stats *s = &nic->mem->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) &s->complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* Device's stats reporting may take several microseconds to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * complete, so we're always waiting for results of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) * previous command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) *complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) nic->tx_frames = le32_to_cpu(s->tx_good_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) ns->collisions += nic->tx_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) le32_to_cpu(s->tx_lost_crs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) nic->rx_short_frame_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) le32_to_cpu(s->rx_short_frame_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) ns->rx_length_errors = nic->rx_short_frame_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) nic->rx_over_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) le32_to_cpu(s->rx_alignment_errors) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) le32_to_cpu(s->rx_short_frame_errors) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) le32_to_cpu(s->rx_cdt_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) nic->tx_deferred += le32_to_cpu(s->tx_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) nic->tx_single_collisions +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) le32_to_cpu(s->tx_single_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) nic->tx_multiple_collisions +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) le32_to_cpu(s->tx_multiple_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (nic->mac >= mac_82558_D101_A4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) nic->rx_fc_unsupported +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) le32_to_cpu(s->fc_rcv_unsupported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (nic->mac >= mac_82559_D101M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) nic->tx_tco_frames +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) le16_to_cpu(s->xmt_tco_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) nic->rx_tco_frames +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) le16_to_cpu(s->rcv_tco_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (e100_exec_cmd(nic, cuc_dump_reset, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) "exec cuc_dump_reset failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /* Adjust inter-frame-spacing (IFS) between two transmits if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * we're getting collisions on a half-duplex connection. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (duplex == DUPLEX_HALF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) u32 prev = nic->adaptive_ifs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if ((nic->tx_frames / 32 < nic->tx_collisions) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) (nic->tx_frames > min_frames)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (nic->adaptive_ifs < 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) nic->adaptive_ifs += 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) } else if (nic->tx_frames < min_frames) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (nic->adaptive_ifs >= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) nic->adaptive_ifs -= 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (nic->adaptive_ifs != prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) e100_exec_cb(nic, NULL, e100_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static void e100_watchdog(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct nic *nic = from_timer(nic, t, watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) u32 speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) "right now = %ld\n", jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /* mii library handles link maintenance tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) mii_ethtool_gset(&nic->mii, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) speed = ethtool_cmd_speed(&cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) speed == SPEED_100 ? 100 : 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) netdev_info(nic->netdev, "NIC Link is Down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) mii_check_link(&nic->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) /* Software generated interrupt to recover from (rare) Rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * allocation failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * Unfortunately have to use a spinlock to not re-enable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * accidentally, due to hardware that shares a register between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) * interrupt mask bit and the SW Interrupt generation bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) spin_lock_irq(&nic->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) e100_write_flush(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) spin_unlock_irq(&nic->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) e100_update_stats(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (nic->mac <= mac_82557_D100_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) /* Issue a multicast command to workaround a 557 lock up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) e100_set_multicast_list(nic->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) nic->flags |= ich_10h_workaround;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) nic->flags &= ~ich_10h_workaround;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) mod_timer(&nic->watchdog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) cb->command = nic->tx_command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) dma_addr = pci_map_single(nic->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) skb->data, skb->len, PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) /* If we can't map the skb, have the upper layer try later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) * Use the last 4 bytes of the SKB payload packet as the CRC, used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) * testing, ie sending frames with bad CRC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (unlikely(skb->no_fcs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) cb->command |= cpu_to_le16(cb_tx_nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) cb->command &= ~cpu_to_le16(cb_tx_nc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) /* interrupt every 16 packets regardless of delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if ((nic->cbs_avail & ~15) == nic->cbs_avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) cb->command |= cpu_to_le16(cb_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) cb->u.tcb.tcb_byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) cb->u.tcb.threshold = nic->tx_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) cb->u.tcb.tbd_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) skb_tx_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (nic->flags & ich_10h_workaround) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) Issue a NOP command followed by a 1us delay before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) issuing the Tx command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (e100_exec_cmd(nic, cuc_nop, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) "exec cuc_nop failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) err = e100_exec_cb(nic, skb, e100_xmit_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) case -ENOSPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /* We queued the skb, but now we're out of space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) "No space for CB\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /* This is a hard error - log it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) "Out of Tx resources, returning skb\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) static int e100_tx_clean(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) struct net_device *dev = nic->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) int tx_cleaned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) spin_lock(&nic->cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) /* Clean CBs marked complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) for (cb = nic->cb_to_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) cb->status & cpu_to_le16(cb_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) cb = nic->cb_to_clean = cb->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) dma_rmb(); /* read skb after status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) "cb[%d]->status = 0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) cb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (likely(cb->skb != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) dev->stats.tx_bytes += cb->skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) pci_unmap_single(nic->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) le32_to_cpu(cb->u.tcb.tbd.buf_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) le16_to_cpu(cb->u.tcb.tbd.size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) dev_kfree_skb_any(cb->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) cb->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) tx_cleaned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) cb->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) nic->cbs_avail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) spin_unlock(&nic->cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /* Recover from running out of Tx resources in xmit_frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) netif_wake_queue(nic->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) return tx_cleaned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) static void e100_clean_cbs(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (nic->cbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) while (nic->cbs_avail != nic->params.cbs.count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) struct cb *cb = nic->cb_to_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (cb->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) pci_unmap_single(nic->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) le32_to_cpu(cb->u.tcb.tbd.buf_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) le16_to_cpu(cb->u.tcb.tbd.size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) dev_kfree_skb(cb->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) nic->cb_to_clean = nic->cb_to_clean->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) nic->cbs_avail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) nic->cbs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) nic->cbs_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) nic->cuc_cmd = cuc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) nic->cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) static int e100_alloc_cbs(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) struct cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) unsigned int i, count = nic->params.cbs.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) nic->cuc_cmd = cuc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) nic->cbs_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) &nic->cbs_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) if (!nic->cbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) cb->link = cpu_to_le32(nic->cbs_dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) ((i+1) % count) * sizeof(struct cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) nic->cbs_avail = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (!nic->rxs) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (RU_SUSPENDED != nic->ru_running) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /* handle init time starts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (!rx) rx = nic->rxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /* (Re)start RU if suspended or idle and RFA is non-NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (rx->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) e100_exec_cmd(nic, ruc_start, rx->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) nic->ru_running = RU_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) /* Init, and map the RFD. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) dev_kfree_skb_any(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) rx->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) rx->dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) /* Link the RFD to end of RFA by linking previous RFD to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * this one. We are safe to touch the previous RFD because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * it is protected by the before last buffer's el bit being set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (rx->prev->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) static int e100_rx_indicate(struct nic *nic, struct rx *rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) unsigned int *work_done, unsigned int work_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) struct net_device *dev = nic->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) struct sk_buff *skb = rx->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) struct rfd *rfd = (struct rfd *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) u16 rfd_status, actual_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) u16 fcs_pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if (unlikely(work_done && *work_done >= work_to_do))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) /* Need to sync before taking a peek at cb_complete bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) rfd_status = le16_to_cpu(rfd->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) "status=0x%04X\n", rfd_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) dma_rmb(); /* read size after status bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* If data isn't ready, nothing to indicate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (unlikely(!(rfd_status & cb_complete))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /* If the next buffer has the el bit, but we think the receiver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) * is still running, check to see if it really stopped while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * we had interrupts off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * This allows for a fast restart without re-enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if ((le16_to_cpu(rfd->command) & cb_el) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) (RU_RUNNING == nic->ru_running))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (ioread8(&nic->csr->scb.status) & rus_no_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) nic->ru_running = RU_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) sizeof(struct rfd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) /* Get actual data size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (unlikely(dev->features & NETIF_F_RXFCS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) fcs_pad = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) actual_size = RFD_BUF_LEN - sizeof(struct rfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) /* Get data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) pci_unmap_single(nic->pdev, rx->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) /* If this buffer has the el bit, but we think the receiver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) * is still running, check to see if it really stopped while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) * we had interrupts off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * This allows for a fast restart without re-enabling interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * This can happen when the RU sees the size change but also sees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * the el bit set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if ((le16_to_cpu(rfd->command) & cb_el) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) (RU_RUNNING == nic->ru_running)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (ioread8(&nic->csr->scb.status) & rus_no_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) nic->ru_running = RU_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /* Pull off the RFD and put the actual data (minus eth hdr) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) skb_reserve(skb, sizeof(struct rfd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) skb_put(skb, actual_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) skb->protocol = eth_type_trans(skb, nic->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) /* If we are receiving all frames, then don't bother
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * checking for errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (unlikely(dev->features & NETIF_F_RXALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) /* Received oversized frame, but keep it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) nic->rx_over_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) goto process_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (unlikely(!(rfd_status & cb_ok))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) /* Don't indicate if hardware indicates errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) /* Don't indicate oversized frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) nic->rx_over_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) process_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) dev->stats.rx_bytes += (actual_size - fcs_pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (work_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) (*work_done)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) rx->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) unsigned int work_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) struct rx *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) int restart_required = 0, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) struct rx *old_before_last_rx, *new_before_last_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) struct rfd *old_before_last_rfd, *new_before_last_rfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) /* Indicate newly arrived packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) err = e100_rx_indicate(nic, rx, work_done, work_to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* Hit quota or no more to clean */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (-EAGAIN == err || -ENODATA == err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) /* On EAGAIN, hit quota so have more work to do, restart once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * cleanup is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) * Else, are we already rnr? then pay attention!!! this ensures that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * the state machine progression never allows a start with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * partially cleaned list, avoiding a race between hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * and rx_to_clean when in NAPI mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) restart_required = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) old_before_last_rx = nic->rx_to_use->prev->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) /* Alloc new skbs to refill list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (unlikely(e100_rx_alloc_skb(nic, rx)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) break; /* Better luck next time (see watchdog) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) new_before_last_rx = nic->rx_to_use->prev->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (new_before_last_rx != old_before_last_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) /* Set the el-bit on the buffer that is before the last buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) * This lets us update the next pointer on the last buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) * without worrying about hardware touching it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) * We set the size to 0 to prevent hardware from touching this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) * When the hardware hits the before last buffer with el-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) * and size of 0, it will RNR interrupt, the RUS will go into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * the No Resources state. It will not complete nor write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) * this buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) new_before_last_rfd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) (struct rfd *)new_before_last_rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) new_before_last_rfd->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) new_before_last_rfd->command |= cpu_to_le16(cb_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) pci_dma_sync_single_for_device(nic->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) new_before_last_rx->dma_addr, sizeof(struct rfd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) /* Now that we have a new stopping point, we can clear the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * stopping point. We must sync twice to get the proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * ordering on the hardware side of things. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) pci_dma_sync_single_for_device(nic->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) old_before_last_rx->dma_addr, sizeof(struct rfd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) + ETH_FCS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) pci_dma_sync_single_for_device(nic->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) old_before_last_rx->dma_addr, sizeof(struct rfd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (restart_required) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) // ack the rnr?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) e100_start_receiver(nic, nic->rx_to_clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (work_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) (*work_done)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) static void e100_rx_clean_list(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) struct rx *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) unsigned int i, count = nic->params.rfds.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) nic->ru_running = RU_UNINITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (nic->rxs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (rx->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) pci_unmap_single(nic->pdev, rx->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) dev_kfree_skb(rx->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) kfree(nic->rxs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) nic->rxs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) nic->rx_to_use = nic->rx_to_clean = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) static int e100_rx_alloc_list(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) struct rx *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) unsigned int i, count = nic->params.rfds.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) struct rfd *before_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) nic->rx_to_use = nic->rx_to_clean = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) nic->ru_running = RU_UNINITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (e100_rx_alloc_skb(nic, rx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) e100_rx_clean_list(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) /* Set the el-bit on the buffer that is before the last buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * This lets us update the next pointer on the last buffer without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * worrying about hardware touching it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * We set the size to 0 to prevent hardware from touching this buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * When the hardware hits the before last buffer with el-bit and size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) * of 0, it will RNR interrupt, the RU will go into the No Resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * state. It will not complete nor write to this buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) rx = nic->rxs->prev->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) before_last = (struct rfd *)rx->skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) before_last->command |= cpu_to_le16(cb_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) before_last->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) nic->rx_to_use = nic->rx_to_clean = nic->rxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) nic->ru_running = RU_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) static irqreturn_t e100_intr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) struct net_device *netdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) "stat_ack = 0x%02X\n", stat_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) stat_ack == stat_ack_not_present) /* Hardware is ejected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) /* Ack interrupt(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) iowrite8(stat_ack, &nic->csr->scb.stat_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) /* We hit Receive No Resource (RNR); restart RU after cleaning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if (stat_ack & stat_ack_rnr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) nic->ru_running = RU_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (likely(napi_schedule_prep(&nic->napi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) e100_disable_irq(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) __napi_schedule(&nic->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) static int e100_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) struct nic *nic = container_of(napi, struct nic, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) unsigned int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) e100_rx_clean(nic, &work_done, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) e100_tx_clean(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /* If budget fully consumed, continue polling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) if (work_done == budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) return budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) /* only re-enable interrupt if stack agrees polling is really done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (likely(napi_complete_done(napi, work_done)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) e100_enable_irq(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) static void e100_netpoll(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) e100_disable_irq(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) e100_intr(nic->pdev->irq, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) e100_tx_clean(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) e100_enable_irq(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) static int e100_set_mac_address(struct net_device *netdev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) e100_exec_cb(nic, NULL, e100_setup_iaaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) static int e100_asf(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) /* ASF can be enabled from eeprom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) static int e100_up(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if ((err = e100_rx_alloc_list(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) if ((err = e100_alloc_cbs(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) goto err_rx_clean_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) if ((err = e100_hw_init(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) goto err_clean_cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) e100_set_multicast_list(nic->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) e100_start_receiver(nic, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) mod_timer(&nic->watchdog, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) nic->netdev->name, nic->netdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) netif_wake_queue(nic->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) napi_enable(&nic->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) /* enable ints _after_ enabling poll, preventing a race between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) * disable ints+schedule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) e100_enable_irq(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) err_no_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) del_timer_sync(&nic->watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) err_clean_cbs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) e100_clean_cbs(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) err_rx_clean_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) e100_rx_clean_list(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) static void e100_down(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) /* wait here for poll to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) napi_disable(&nic->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) netif_stop_queue(nic->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) e100_hw_reset(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) free_irq(nic->pdev->irq, nic->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) del_timer_sync(&nic->watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) netif_carrier_off(nic->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) e100_clean_cbs(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) e100_rx_clean_list(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) /* Reset outside of interrupt context, to avoid request_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * in interrupt context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) schedule_work(&nic->tx_timeout_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) static void e100_tx_timeout_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct nic *nic = container_of(work, struct nic, tx_timeout_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) struct net_device *netdev = nic->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (netif_running(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) e100_down(netdev_priv(netdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) e100_up(netdev_priv(netdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) /* Use driver resources to perform internal MAC or PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) * loopback test. A single packet is prepared and transmitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * in loopback mode, and the test passes if the received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * packet compares byte-for-byte to the transmitted packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) if ((err = e100_rx_alloc_list(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if ((err = e100_alloc_cbs(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) goto err_clean_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) /* ICH PHY loopback is broken so do MAC loopback instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (nic->flags & ich && loopback_mode == lb_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) loopback_mode = lb_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) nic->loopback = loopback_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if ((err = e100_hw_init(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) goto err_loopback_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) if (loopback_mode == lb_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) BMCR_LOOPBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) e100_start_receiver(nic, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) goto err_loopback_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) skb_put(skb, ETH_DATA_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) memset(skb->data, 0xFF, ETH_DATA_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) e100_xmit_frame(skb, nic->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) skb->data, ETH_DATA_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) err_loopback_none:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) nic->loopback = lb_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) e100_clean_cbs(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) e100_hw_reset(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) err_clean_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) e100_rx_clean_list(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) #define MII_LED_CONTROL 0x1B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) #define E100_82552_LED_OVERRIDE 0x19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) #define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) #define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) static int e100_get_link_ksettings(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) mii_ethtool_get_link_ksettings(&nic->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) static int e100_set_link_ksettings(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) err = mii_ethtool_set_link_ksettings(&nic->mii, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) e100_exec_cb(nic, NULL, e100_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) static void e100_get_drvinfo(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) strlcpy(info->bus_info, pci_name(nic->pdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) #define E100_PHY_REGS 0x1D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) static int e100_get_regs_len(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) /* We know the number of registers, and the size of the dump buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) * Calculate the total size in bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) static void e100_get_regs(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) struct ethtool_regs *regs, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) u32 *buff = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) regs->version = (1 << 24) | nic->pdev->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) ioread8(&nic->csr->scb.cmd_lo) << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) ioread16(&nic->csr->scb.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) for (i = 0; i < E100_PHY_REGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) /* Note that we read the registers in reverse order. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) * ordering is the ABI apparently used by ethtool and other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * applications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) E100_PHY_REGS - 1 - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) e100_exec_cb(nic, NULL, e100_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) sizeof(nic->mem->dump_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) !device_can_wakeup(&nic->pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (wol->wolopts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) nic->flags |= wol_magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) nic->flags &= ~wol_magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) e100_exec_cb(nic, NULL, e100_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) static u32 e100_get_msglevel(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) return nic->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) static void e100_set_msglevel(struct net_device *netdev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) nic->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) static int e100_nway_reset(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) return mii_nway_restart(&nic->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) static u32 e100_get_link(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) return mii_link_ok(&nic->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) static int e100_get_eeprom_len(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) return nic->eeprom_wc << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) #define E100_EEPROM_MAGIC 0x1234
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) static int e100_get_eeprom(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct ethtool_eeprom *eeprom, u8 *bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) eeprom->magic = E100_EEPROM_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) static int e100_set_eeprom(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) struct ethtool_eeprom *eeprom, u8 *bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) if (eeprom->magic != E100_EEPROM_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) return e100_eeprom_save(nic, eeprom->offset >> 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) (eeprom->len >> 1) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) static void e100_get_ringparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) struct ethtool_ringparam *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) struct param_range *rfds = &nic->params.rfds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) struct param_range *cbs = &nic->params.cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) ring->rx_max_pending = rfds->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) ring->tx_max_pending = cbs->max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) ring->rx_pending = rfds->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) ring->tx_pending = cbs->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) static int e100_set_ringparam(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) struct ethtool_ringparam *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) struct param_range *rfds = &nic->params.rfds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) struct param_range *cbs = &nic->params.cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) e100_down(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) rfds->count = max(ring->rx_pending, rfds->min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) rfds->count = min(rfds->count, rfds->max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) cbs->count = max(ring->tx_pending, cbs->min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) cbs->count = min(cbs->count, cbs->max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) rfds->count, cbs->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) e100_up(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) "Link test (on/offline)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) "Eeprom test (on/offline)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) "Self test (offline)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) "Mac loopback (offline)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) "Phy loopback (offline)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) #define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) static void e100_diag_test(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) struct ethtool_test *test, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) struct ethtool_cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) memset(data, 0, E100_TEST_LEN * sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) data[0] = !mii_link_ok(&nic->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) data[1] = e100_eeprom_load(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (test->flags & ETH_TEST_FL_OFFLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) /* save speed, duplex & autoneg settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) mii_ethtool_gset(&nic->mii, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) e100_down(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) data[2] = e100_self_test(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) data[3] = e100_loopback_test(nic, lb_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) data[4] = e100_loopback_test(nic, lb_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) /* restore speed, duplex & autoneg settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) mii_ethtool_sset(&nic->mii, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) e100_up(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) for (i = 0; i < E100_TEST_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) msleep_interruptible(4 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) static int e100_set_phys_id(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) enum ethtool_phys_id_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) enum led_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) led_on = 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) led_off = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) led_on_559 = 0x05,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) led_on_557 = 0x07,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) MII_LED_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) u16 leds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) case ETHTOOL_ID_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) case ETHTOOL_ID_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) case ETHTOOL_ID_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) case ETHTOOL_ID_INACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) "rx_length_errors", "rx_over_errors", "rx_crc_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) "tx_heartbeat_errors", "tx_window_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) /* device-specific stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) "tx_flow_control_pause", "rx_flow_control_pause",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) "rx_short_frame_errors", "rx_over_length_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) #define E100_NET_STATS_LEN 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) #define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) static int e100_get_sset_count(struct net_device *netdev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) case ETH_SS_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) return E100_TEST_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) return E100_STATS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) static void e100_get_ethtool_stats(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) struct ethtool_stats *stats, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) for (i = 0; i < E100_NET_STATS_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) data[i] = ((unsigned long *)&netdev->stats)[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) data[i++] = nic->tx_deferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) data[i++] = nic->tx_single_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) data[i++] = nic->tx_multiple_collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) data[i++] = nic->tx_fc_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) data[i++] = nic->rx_fc_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) data[i++] = nic->rx_fc_unsupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) data[i++] = nic->tx_tco_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) data[i++] = nic->rx_tco_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) data[i++] = nic->rx_short_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) data[i++] = nic->rx_over_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) switch (stringset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) case ETH_SS_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) static const struct ethtool_ops e100_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) .get_drvinfo = e100_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) .get_regs_len = e100_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) .get_regs = e100_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) .get_wol = e100_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) .set_wol = e100_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) .get_msglevel = e100_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) .set_msglevel = e100_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) .nway_reset = e100_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) .get_link = e100_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) .get_eeprom_len = e100_get_eeprom_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) .get_eeprom = e100_get_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) .set_eeprom = e100_set_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) .get_ringparam = e100_get_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) .set_ringparam = e100_set_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) .self_test = e100_diag_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) .get_strings = e100_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) .set_phys_id = e100_set_phys_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) .get_ethtool_stats = e100_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) .get_sset_count = e100_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) .get_ts_info = ethtool_op_get_ts_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) .get_link_ksettings = e100_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) .set_link_ksettings = e100_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) static int e100_alloc(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) &nic->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) return nic->mem ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) static void e100_free(struct nic *nic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (nic->mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) pci_free_consistent(nic->pdev, sizeof(struct mem),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) nic->mem, nic->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) nic->mem = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) static int e100_open(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) if ((err = e100_up(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) static int e100_close(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) e100_down(netdev_priv(netdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) static int e100_set_features(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) netdev_features_t changed = features ^ netdev->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) netdev->features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) e100_exec_cb(nic, NULL, e100_configure);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) static const struct net_device_ops e100_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) .ndo_open = e100_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) .ndo_stop = e100_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) .ndo_start_xmit = e100_xmit_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) .ndo_set_rx_mode = e100_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) .ndo_set_mac_address = e100_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) .ndo_do_ioctl = e100_do_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) .ndo_tx_timeout = e100_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) .ndo_poll_controller = e100_netpoll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) .ndo_set_features = e100_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) struct nic *nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) if (!(netdev = alloc_etherdev(sizeof(struct nic))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) netdev->hw_features |= NETIF_F_RXFCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) netdev->priv_flags |= IFF_SUPP_NOFCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) netdev->hw_features |= NETIF_F_RXALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) netdev->netdev_ops = &e100_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) netdev->ethtool_ops = &e100_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) nic->netdev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) nic->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) nic->msg_enable = (1 << debug) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) nic->mdio_ctrl = mdio_ctrl_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) pci_set_drvdata(pdev, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) if ((err = pci_enable_device(pdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) goto err_out_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) goto err_out_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) if ((err = pci_request_regions(pdev, DRV_NAME))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) goto err_out_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) SET_NETDEV_DEV(netdev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (use_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (!nic->csr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (ent->driver_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) nic->flags |= ich;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) nic->flags &= ~ich;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) e100_get_defaults(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) if (nic->mac < mac_82558_D101_A4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) netdev->features |= NETIF_F_VLAN_CHALLENGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) /* locks must be initialized before calling hw_reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) spin_lock_init(&nic->cb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) spin_lock_init(&nic->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) spin_lock_init(&nic->mdio_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) /* Reset the device before pci_set_master() in case device is in some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) * funky state and has an interrupt pending - hint: we don't have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) * interrupt handler registered yet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) e100_hw_reset(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) timer_setup(&nic->watchdog, e100_watchdog, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) if ((err = e100_alloc(nic))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) if ((err = e100_eeprom_load(nic)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) goto err_out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) e100_phy_init(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) if (!is_valid_ether_addr(netdev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) if (!eeprom_bad_csum_allow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) goto err_out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) /* Wol magic packet can be enabled from eeprom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if ((nic->mac >= mac_82558_D101_A4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) nic->flags |= wol_magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) device_set_wakeup_enable(&pdev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) /* ack any pending wake events, disable PME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) pci_pme_active(pdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) strcpy(netdev->name, "eth%d");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) if ((err = register_netdev(netdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) goto err_out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) nic->cbs_pool = dma_pool_create(netdev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) &nic->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) nic->params.cbs.max * sizeof(struct cb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) if (!nic->cbs_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) goto err_out_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) netif_info(nic, probe, nic->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) "addr 0x%llx, irq %d, MAC addr %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) pdev->irq, netdev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) err_out_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) unregister_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) err_out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) e100_free(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) err_out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) pci_iounmap(pdev, nic->csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) err_out_free_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) err_out_disable_pdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) err_out_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) static void e100_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) struct net_device *netdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) if (netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) unregister_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) e100_free(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) pci_iounmap(pdev, nic->csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) dma_pool_destroy(nic->cbs_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) #define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) struct net_device *netdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) netif_device_detach(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) e100_down(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if ((nic->flags & wol_magic) | e100_asf(nic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) /* enable reverse auto-negotiation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) if (nic->phy == phy_82552_v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) E100_82552_SMARTSPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) mdio_write(netdev, nic->mii.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) E100_82552_SMARTSPEED, smartspeed |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) *enable_wake = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) *enable_wake = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) static int __e100_power_off(struct pci_dev *pdev, bool wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) if (wake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) return pci_prepare_to_sleep(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) pci_wake_from_d3(pdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) pci_set_power_state(pdev, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) static int __maybe_unused e100_suspend(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) bool wake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) __e100_shutdown(to_pci_dev(dev_d), &wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) static int __maybe_unused e100_resume(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) struct net_device *netdev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) err = pci_enable_device(to_pci_dev(dev_d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) netdev_err(netdev, "Resume cannot enable PCI device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) pci_set_master(to_pci_dev(dev_d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) /* disable reverse auto-negotiation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if (nic->phy == phy_82552_v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) E100_82552_SMARTSPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) mdio_write(netdev, nic->mii.phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) E100_82552_SMARTSPEED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) smartspeed & ~(E100_82552_REV_ANEG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) e100_up(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) netif_device_attach(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) static void e100_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) bool wake;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) __e100_shutdown(pdev, &wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) if (system_state == SYSTEM_POWER_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) __e100_power_off(pdev, wake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) /* ------------------ PCI Error Recovery infrastructure -------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) * e100_io_error_detected - called when PCI error is detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) * @pdev: Pointer to PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) * @state: The current pci connection state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) struct net_device *netdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) netif_device_detach(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) if (state == pci_channel_io_perm_failure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) return PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) if (netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) e100_down(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) /* Request a slot reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) return PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) * e100_io_slot_reset - called after the pci bus has been reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) * @pdev: Pointer to PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) * Restart the card from scratch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) struct net_device *netdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) if (pci_enable_device(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) pr_err("Cannot re-enable PCI device after reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) return PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) /* Only one device per card can do a reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) if (0 != PCI_FUNC(pdev->devfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) return PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) e100_hw_reset(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) e100_phy_init(nic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) return PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) * e100_io_resume - resume normal operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) * @pdev: Pointer to PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) * Resume normal operations after an error recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) * sequence has been completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) static void e100_io_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) struct net_device *netdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) struct nic *nic = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) /* ack any pending wake events, disable PME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) pci_enable_wake(pdev, PCI_D0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) netif_device_attach(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) if (netif_running(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) e100_open(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) mod_timer(&nic->watchdog, jiffies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) static const struct pci_error_handlers e100_err_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) .error_detected = e100_io_error_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) .slot_reset = e100_io_slot_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) .resume = e100_io_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) static struct pci_driver e100_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) .id_table = e100_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) .probe = e100_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) .remove = e100_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) /* Power Management hooks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) .driver.pm = &e100_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) .shutdown = e100_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) .err_handler = &e100_err_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) static int __init e100_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) if (((1 << debug) - 1) & NETIF_MSG_DRV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) pr_info("%s\n", DRV_DESCRIPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) pr_info("%s\n", DRV_COPYRIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) return pci_register_driver(&e100_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) static void __exit e100_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) pci_unregister_driver(&e100_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) module_init(e100_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) module_exit(e100_cleanup_module);